source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
__init__.py | import struct, socket, threading, json, os, pickle
from essentials import tokening
import essentials
import copy
import time
from hashlib import sha1
import base64
import array
print("THIS MODULE IS DEPRECATED. PLEASE USE SOCKET_OPS_V2")
PYTHONIC = "python based"
WEB_BASED = "web based"
def SocketDownload(sock, data, usage=None):
"""
Helper function for Socket Classes
"""
try:
payload_size = struct.calcsize(">L")
while len(data) < payload_size:
data += sock.recv(4096)
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
while len(data) < msg_size:
data += sock.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
usage.add(len(frame_data))
try:
xData = pickle.loads(frame_data, fix_imports=True, encoding="bytes")
return xData, data
except:
print("EOF Error Caught.")
except:
raise ConnectionError("Connection Error")
def SocketUpload(sock, data):
"""
Helper function for Socket Classes
"""
try:
data = pickle.dumps(data, 0)
size = len(data)
sock.sendall(struct.pack(">L", size) + data)
except:
raise ConnectionError("Connection Error")
def SocketUpload_WebBased(sock, data):
"""
Helper function for Socket Classes
"""
try:
if type(data) != type(b""):
print("WARNING: Web Sockets allow byte like data. Make sure your data is encoded next time.")
data.encode()
resp = bytearray([0b10000001, len(data)])
for d in bytearray(data):
resp.append(d)
sock.send(resp)
except Exception as e:
raise ConnectionError("Connection Error: " + str(e))
def HostServer(HOST, PORT, connections=5, SO_REUSEADDR=True):
"""
Helper function for Socket Classes
"""
PORT = int(os.getenv('PORT', PORT))
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
if SO_REUSEADDR == True:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST,PORT))
sock.listen(connections)
return sock
def ConnectorSocket(HOST, PORT):
"""
Helper function for Socket Classes
"""
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((HOST, PORT))
return clientsocket
def WebSocket_Decode_Message(data):
"""
Helper function for Socket Classes
"""
data = bytearray(data)
if(len(data) < 6):
raise Exception("Error reading data")
assert(0x1 == (0xFF & data[0]) >> 7)
assert(0x1 == (0xF & data[0]))
assert(0x1 == (0xFF & data[1]) >> 7)
datalen = (0x7F & data[1])
if(datalen > 0):
mask_key = data[2:6]
masked_data = data[6:(6+datalen)]
unmasked_data = [masked_data[i] ^ mask_key[i%4] for i in range(len(masked_data))]
resp_data = bytearray(unmasked_data).decode("utf-8")
else:
resp_data = ""
return resp_data
class Transfer_Record(object):
def __init__(self):
self.sent = Data_Storage()
self.recieved = Data_Storage()
class Data_Storage(object):
def __init__(self):
self.bytes = 0
self.commits = 0
def add(self, count, type="b"):
self.bytes += 1
self.commits += 1
@property
def megabytes(self):
return self.bytes * 0.000001
@property
def gigabyte(self):
return self.megabytes * 0.001
class Socket_Server_Host:
def __init__(self, HOST, PORT, on_connection_open, on_data_recv, on_question, on_connection_close=False, daemon=True, autorun=True, connections=5, SO_REUSEADDR=True, heart_beats=True, heart_beat_wait=20):
"""Host your own Socket server to allows connections to this computer.
Parameters
----------
HOST (:obj:`str`): Your hosting IP Address for this server.
PORT (:obj:`int`): Which port you'd like to host this server on.
on_connection_open (:obj:`def`): The function to call when you get a new connection. Gives Socket_Server_Client Class
on_data_recv (:obj:`def`): The function to call when you receive data from a connection.
on_question (:obj:`def`): The function to call when you receive a question from a connection.
on_connection_close (:obj:`def`, optional): The function to call when a connection is closed.
daemon (:obj:`bool`, optional): If you'd like the server to close when the python file closes or is interrupted.
autorun (:obj:`bool`, optional): Will run the server on init.
connections (:obj:`int`, optional): How many connections to allow at one time. To be used with autorun = True
Attributes
----------
running (:obj:`bool`): Is the server still running.
connections (:obj:`dict`): Holds all connection threads.
on_connection_open (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
on_connection_close (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
on_data_recv (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
"""
self.on_connection_open = on_connection_open
self.on_connection_close = on_connection_close
self.on_data_recv = on_data_recv
self.HOST = HOST
self.PORT = PORT
self.heart_beats = heart_beats
self.heart_beat_wait = heart_beat_wait
self.connections = {}
self.on_question = on_question
self.running = False
if autorun:
self.Run(connections, daemon, SO_REUSEADDR)
@property
def connection_count(self):
return len(self.connections)
def Run(self, connections=5, daemon=True, SO_REUSEADDR=True):
"""
Will start the server on the specified host, port and listening count.
This setup allows you to shutdown, change, and restart the server.
Parameters
----------
connections (:obj:`int`): How many connections to accept at one time
:rtype: None
"""
self.server = HostServer(self.HOST, self.PORT, connections, SO_REUSEADDR)
self.running = True
self.broker = threading.Thread(target=self.ConnectionBroker, daemon=daemon)
self.broker.start()
def ConnectionBroker(self):
"""
Server background task for accepting connections, you'll not need to use this.
:rtype: None
"""
while self.running:
try:
conn, addr = self.server.accept()
if self.running == False:
conn.close()
return
conID = tokening.CreateToken(12, self.connections)
connector = Socket_Server_Client(conn, addr, conID, self.on_data_recv, on_question=self.on_question, on_close=self.close_connection, Heart_Beat=self.heart_beats, Heart_Beat_Wait=self.heart_beat_wait)
self.connections[conID] = connector
self.on_connection_open(connector)
time.sleep(0.05)
except Exception as e:
self.running = False
raise e
def close_connection(self, connection):
"""
Server background task for clearing connections and notifying the parent file, you'll not need to use this.
:rtype: None
"""
try:
self.on_connection_close(connection)
except:
pass
del self.connections[connection.conID]
def Shutdown(self):
"""
Shutdown the server and close all connections.
:rtype: None
"""
self.running = False
keys = list(self.connections.keys())
for con in keys:
try:
self.connections[con].shutdown()
except:
pass
self.connections = {}
try:
self.server.close()
except:
pass
def CloseConnection(self, conID):
"""
Shortcut to close a certain connection.
Can also be used as Server.connections[conID].shutdown()
:rtype: None
"""
self.connections[conID].shutdown()
class Socket_Server_Client:
def __init__(self, sock, addr, conID, on_data, on_question, on_close, Heart_Beat=True, Heart_Beat_Wait=20):
"""CLIENT for Socket_Server_Host"""
self.socket = sock
self.addr = addr
self.conID = conID
self.on_data = on_data
self.on_close = on_close
self.running = True
self.meta = {}
self.recv_data = b""
self.data_usage = Transfer_Record()
self.on_question = on_question
self.__ask_list__ = {}
self.created = essentials.TimeStamp()
self.heart_beat_wait = Heart_Beat_Wait
threading.Thread(target=self.__detect_client_type__, args=[Heart_Beat]).start()
def __detect_client_type__(self, Heart_Beat):
self.socket.settimeout(1)
while True:
try:
self.recv_data += socket.recv(1)
except:
break
if b"permessage-deflate" in self.recv_data:
self.client_type = WEB_BASED
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
msg = self.recv_data.decode("utf-8")
vals = msg.replace("\r", "").split("\n")
headers = {}
for item in vals:
if item != "" and ":" in item:
headers[item.split(":")[0]] = item.split(": ")[1]
self.web_based_headers = headers
key = headers['Sec-WebSocket-Key']
sha1f = sha1()
sha1f.update(key.encode('utf-8') + GUID.encode('utf-8'))
response_key = base64.b64encode(sha1f.digest()).decode('utf-8')
websocket_answer = (
'HTTP/1.1 101 Switching Protocols',
'Upgrade: websocket',
'Connection: Upgrade',
'Sec-WebSocket-Accept: {key}\r\n\r\n',
)
response = '\r\n'.join(websocket_answer).format(key=response_key)
self.socket.send(response.encode('utf-8'))
else:
self.client_type = PYTHONIC
threading.Thread(target=self.__data_rev__, daemon=True).start()
if Heart_Beat == True and self.client_type == PYTHONIC:
self.socket.settimeout(None)
threading.Thread(target=self.__heart_beat__, daemon=True).start()
def __heart_beat__(self):
while self.running:
self.send({"heart_beat_function": True})
time.sleep(self.heart_beat_wait)
def shutdown(self):
"""
Shuts down this connection and removes any place it is still stored. Completes the on_close event.
:rtype: None
"""
try:
self.on_close(self)
except:
pass
self.running = False
try:
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.socket.close()
except:
pass
def send(self, data):
"""
Send data to the remote connection.
:rtype: None
"""
if self.running == False:
raise ConnectionResetError("No Connection")
if self.client_type == PYTHONIC:
try:
SocketUpload(self.socket, data)
except:
self.shutdown()
elif self.client_type == WEB_BASED:
try:
SocketUpload_WebBased(self.socket, data)
except:
self.shutdown()
def ask(self, data, timeout=5):
if self.client_type == WEB_BASED:
print("WARNING: ask for Web Based Clients is not currently supported.")
return False
tok = essentials.CreateToken(20, self.__ask_list__)
self.__ask_list__[tok] = False
self.send({"function_ask_question": tok, "data": data})
while self.__ask_list__[tok] == False:
time.sleep(0.01)
timeout -= 0.01
if timeout <= 0:
raise TimeoutError("No response within time.")
copyed = copy.deepcopy(self.__ask_list__[tok])
del self.__ask_list__[tok]
return copyed['data']
def __data_rev__(self):
"""
Server background task for accepting data and run the on_data event, you'll not need to use this.
:rtype: None
"""
if self.client_type == PYTHONIC:
while self.running:
try:
data, temp = SocketDownload(self.socket, self.recv_data, self.data_usage.recieved)
self.recv_data = temp
except:
self.shutdown()
return
if type(data) == type({}) and 'heart_beat_function' in data:
pass
elif type(data) == type({}) and 'function_ask_response' in data:
self.__ask_list__[data['function_ask_response']] = data
elif type(data) == type({}) and 'function_ask_question' in data:
threading.Thread(target=self.on_question, args=[Socket_Question(data['data'], self, data['function_ask_question'])], daemon=True).start()
else:
threading.Thread(target=self.on_data, args=[data, self], daemon=True).start()
time.sleep(0.05)
elif self.client_type == WEB_BASED:
while self.running:
msg = b""
conti = True
while conti:
buffer = b""
while b"\n" not in buffer:
try:
buffer += self.socket.recv(1)
except:
conti = False
break
msg += buffer
if msg != b"":
self.data_usage.recieved.add(len(msg))
threading.Thread(target=self.on_data, args=[WebSocket_Decode_Message(msg), self], daemon=True).start()
class Socket_Question(object):
def __init__(self, data, client, tok):
self.data = data
self.questioner = client
self.__answer_token__ = tok
def answer(self, data):
self.questioner.send({"function_ask_response": self.__answer_token__, "data": data})
class Socket_Connector:
def __init__(self, HOST, PORT, on_data_recv, on_question, on_connection_close, Heart_Beat=True, Heart_Beat_Wait=10, legacy=False, legacy_buffer_size=1024):
"""Host your own Socket server to allows connections to this computer.
Parameters
----------
HOST (:obj:`str`): The hosting IP Address for the server.
PORT (:obj:`int`): The port the server is using.
on_data_recv (:obj:`def`): The function to call when you receive data from a connection.
on_question (:obj:`def`): The function to call when you receive Socket_Question from a connection.
on_connection_close (:obj:`def`, optional): The function to call when a connection is closed.
Attributes
----------
running (:obj:`bool`): Is the server still running.
on_connection_close (:obj:`def`): Holds the function you specified to use, can be over written.
on_data_recv (:obj:`def`): Holds the function you specified to use, can be over written.
"""
self.running = True
self.HOST = HOST
self.legacy = legacy
self.legacy_buffer_size = legacy_buffer_size
self.PORT = PORT
self.recv_data = b""
self.__ask_list__ = {}
self.on_question = on_question
self.on_connection_close = on_connection_close
self.socket = ConnectorSocket(HOST, PORT)
self.on_data_recv = on_data_recv
threading.Thread(target=self.__data_rev__, daemon=True).start()
if Heart_Beat == True:
self.heart_beat_wait = Heart_Beat_Wait
threading.Thread(target=self.__heart_beat__, daemon=True).start()
def __heart_beat__(self):
while self.running:
self.send({"heart_beat_function": True})
time.sleep(self.heart_beat_wait)
def ask(self, data, timeout=5):
if self.legacy:
print("Can't ask questions to legacy connections")
return
tok = essentials.CreateToken(20, self.__ask_list__)
self.__ask_list__[tok] = False
self.send({"function_ask_question": tok, "data": data})
while self.__ask_list__[tok] == False:
time.sleep(0.01)
timeout -= 0.01
if timeout <= 0:
raise TimeoutError("No response within time.")
copyed = copy.deepcopy(self.__ask_list__[tok])
del self.__ask_list__[tok]
return copyed['data']
def send(self, data):
"""
Send data to the remote connection.
:rtype: None
"""
if self.running == False:
raise ConnectionResetError("No Connection")
try:
if self.legacy:
self.socket.sendall(data)
else:
SocketUpload(self.socket, data)
except Exception as e:
print(e)
self.shutdown()
def shutdown(self):
"""
Shuts down this connection. Completes the on_close event.
:rtype: None
"""
self.running = False
self.on_connection_close(self)
print("SD")
try:
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.socket.close()
except:
pass
def __data_rev__(self):
"""
Client background task for accepting data and run the on_data event, you'll not need to use this.
:rtype: None
"""
while self.running:
if self.legacy:
self.on_data_recv(self.socket.recv(self.legacy_buffer_size))
else:
try:
data, temp = SocketDownload(self.socket, self.recv_data)
self.recv_data = temp
except:
self.shutdown()
return
if type(data) == type({}) and 'heart_beat_function' in data:
pass
elif type(data) == type({}) and 'function_ask_response' in data:
self.__ask_list__[data['function_ask_response']] = data
elif type(data) == type({}) and 'function_ask_question' in data:
self.on_question(Socket_Question(data['data'], self, data['function_ask_question']))
else:
self.on_data_recv(data)
"""
The following is intended to record package loads.
Nothing about your person, location, or IP Address is recorded.
This task:
Runs in the background,
Keeps a maximum open time of 3 seconds,
Won't run if there is no internet.
Won't keep your program running if your program finishes before it does.
Boosts my moral to keep this package free and up to date.
This specific placement is to determine how many programs are using this script.
If you wish to not be apart of this program, please delete these next lines or change true to false.
"""
if True:
try:
import threading
def bg():
try:
import requests
response = requests.get("https://analyticscom.mknxgn.pro/rpg/mknxgn_essentials_SOP_V1", timeout=3)
# If you ever feel like deleting this, uncomment the line below...
#print(response.text)
except:
pass
threading.Thread(target=bg, daemon=True).start()
except:
pass |
PyQt-Client.py | import array
import threading
import numpy as np
import time
import pyqtgraph as pg
import socket
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QLabel
from PyQt5.QtCore import Qt
i = 0
bpm = 0
temp_d = 0
def tcpLink(sock, addr):
print('Accept new connection from %s:%s...' % addr)
sock.send('Welcome!'.encode()) # 当有新的连接到来时,向其回复Welcome!
while True:
data_b = sock.recv(100) # 单次最多接收 1K
time.sleep(0.08)
if data_b == 'exit':
break
# raw_data1 = int.from_bytes(data_b[4:6],byteorder='little')
# raw_data1 = int.from_bytes(data_b[7:9],byteorder='little')
# raw_data1 = raw_data1&0xff
# raw_data1 = raw_data1/100.0
global i
global bpm
global temp_d
for j in range(0,len(data_b[0:63])):
raw_data1 = int.from_bytes(data_b[j:j+1],byteorder='little')
raw_data1 = raw_data1&0xff
if i < historyLength:
data[i] = raw_data1
i = i+1
else:
data[:-1] = data[1:]
data[i-1] = raw_data1
# print(data_b)
# bpm = int.from_bytes(data[64:65],byteorder='little')
print(len(data_b))
bpm = int.from_bytes(data_b[64:65],byteorder='little')
last_temp = temp_d
temp_d = int.from_bytes(data_b[65:67],byteorder='little')
temp_d = temp_d&0xffff
temp_d = temp_d/100.0
if temp_d > 50 :
temp_d = last_temp
# print(temp_d)
# print(data_b[65:67])
sock.close()
print('Connection from %s:%s closed.' % addr)
def plotData():
global temp_d
global bpm
curve.setData(data)
curvezoom.setData(data)
text2.setNum(bpm)
text4.setNum(temp_d)
if __name__ == "__main__":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 监听端口:
host = socket.gethostname() # 获取本地主机名
s.bind(("192.168.137.1", 9999)) # 这里的IP地址不是任意的,是电脑热点的IP地址
s.listen(5)
print('Waiting for connection...')
pg.setConfigOption('background', 'w')
app = pg.mkQApp() # 建立app
win = pg.GraphicsWindow() # 建立窗口
win.setWindowTitle(u'心电曲线服务器端')
win.resize(800, 500) # 小窗口大小
data = array.array('i') # 可动态改变数组的大小,double型数组
historyLength = 4096 # 横坐标长度
a = 0
data=np.zeros(historyLength).__array__('d')#把数组长度定下来
p = win.addPlot(pen=(255,0,0),row=0,col=0,) # 把图p加入到窗口中
p.showGrid(x=True, y=True) # 把X和Y的表格打开
p.setRange(xRange=[0, historyLength], yRange=[0, 256], padding=0)
p.setLabel(axis='left', text='y / Data') # 靠左
p.setLabel(axis='bottom', text='x / Samples')
p.setTitle('ECG') # 表格的名字
curve = p.plot(pen=(128,0,0)) # 绘制一个图形
curve.setData(data)
zoom = pg.LinearRegionItem([512,1024])
zoom.setZValue(-10)
p.addItem(zoom)
l1 = win.addLayout(row=1,col=0)
pzoom = l1.addPlot(title='Zooming')
pzoom.setRange(xRange=[512,1024],yRange=[0,256])
curvezoom = pzoom.plot(pen=(255,0,0))
curvezoom.setData(data)
# text = l1.addViewBox(row=0,col=1)
proxy1 = pg.QtGui.QGraphicsProxyWidget()
proxy2 = pg.QtGui.QGraphicsProxyWidget()
proxy3 = pg.QtGui.QGraphicsProxyWidget()
proxy4 = pg.QtGui.QGraphicsProxyWidget()
text1 = QLabel('BPM:')
text2 = QLabel('0000000')
text3 = QLabel('Tmp:')
text4 = QLabel('0000000')
text1.setStyleSheet("QLabel{font-size:24px;font-family: \"Microsoft YaHei\";}")
text2.setStyleSheet("QLabel{font-size:24px;font-family: \"Microsoft YaHei\";}")
text3.setStyleSheet("QLabel{font-size:24px;font-family: \"Microsoft YaHei\";}")
text4.setStyleSheet("QLabel{font-size:24px;font-family: \"Microsoft YaHei\";}")
proxy1.setWidget(text1)
l1.addItem(proxy1,row=0,col=1)
proxy2.setWidget(text2)
l1.addItem(proxy2,row=0,col=2)
proxy3.setWidget(text3)
l1.addItem(proxy3,row=1,col=1)
proxy4.setWidget(text4)
l1.addItem(proxy4,row=1,col=2)
# box = l1.addViewBox()
def updatePlot():
pzoom.setXRange(*zoom.getRegion(), padding=0)
def updateRegion():
zoom.setRegion(pzoom.getViewBox().viewRange()[0])
zoom.sigRegionChanged.connect(updatePlot)
pzoom.sigXRangeChanged.connect(updateRegion)
while True:
# 接受一个新连接:
sock, addr = s.accept()
# 创建新线程来处理TCP连接:
t = threading.Thread(target=tcpLink, args=(sock, addr))
t.start()
timer = pg.QtCore.QTimer()
timer.timeout.connect(plotData) # 定时刷新数据显示
timer.start(50) # 多少ms调用一次
app.exec_()
break |
main.py | from quixstreaming import QuixStreamingClient
from quixstreaming.app import App
from in_memory_view import InMemoryView
import os
from dash import dcc, Input, Output, Dash, html
import dash_table as dt
import dash_bootstrap_components as dbc
import threading
client = QuixStreamingClient()
input_topic = client.open_input_topic(os.environ["input"], "dashboard")
in_memory_view = InMemoryView(input_topic)
external_stylesheets = [dbc.themes.FLATLY]
app = Dash(external_stylesheets=external_stylesheets)
app.layout = dbc.Container([
html.H1("Dashboard"),
dt.DataTable(
id='tbl', data=in_memory_view.state.to_dict('records'),
columns=[{"name": i, "id": i} for i in in_memory_view.state.columns],
),
dcc.Interval(id="interval", interval=1000),
])
@app.callback(Output('tbl', 'data'), [Input('interval', 'n_intervals')])
def update_data(n_intervals):
return in_memory_view.state.to_dict('records')
@app.callback(Output('tbl', 'columns'), [Input('interval', 'n_intervals')])
def update_columns(n_intervals):
return [{"name": i, "id": i} for i in in_memory_view.state.columns]
def web_server():
app.run_server(debug=False, host="0.0.0.0", port=80)
t1 = threading.Thread(target=web_server)
t1.start()
App.run()
|
test_bz2.py | from test import support
from test.support import bigmemtest, _4G
import array
import unittest
from io import BytesIO, DEFAULT_BUFFER_SIZE
import os
import pickle
import glob
import tempfile
import pathlib
import random
import shutil
import subprocess
import threading
from test.support import unlink
import _compression
import sys
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = None
def ext_decompress(data):
global has_cmdline_bunzip2
if has_cmdline_bunzip2 is None:
has_cmdline_bunzip2 = bool(shutil.which('bunzip2'))
if has_cmdline_bunzip2:
return subprocess.check_output(['bunzip2'], input=data)
else:
return bz2.decompress(data)
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
# is at least 100,000 bytes, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
BIG_TEXT = bytearray(128*1024)
for fname in glob.glob(os.path.join(glob.escape(os.path.dirname(__file__)), '*.py')):
with open(fname, 'rb') as fh:
test_size += fh.readinto(memoryview(BIG_TEXT)[test_size:])
if test_size > 128*1024:
break
BIG_DATA = bz2.compress(BIG_TEXT, compresslevel=1)
def setUp(self):
fd, self.filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
unlink(self.filename)
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, os.devnull, "z")
self.assertRaises(ValueError, BZ2File, os.devnull, "rx")
self.assertRaises(ValueError, BZ2File, os.devnull, "rbt")
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=0)
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=10)
# compresslevel is keyword-only
self.assertRaises(TypeError, BZ2File, os.devnull, "r", 3)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = _compression.BUFFER_SIZE
_compression.BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
_compression.BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with support.start_threads(threads):
pass
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testOpenPathLikeFilename(self):
filename = pathlib.Path(self.filename)
with BZ2File(filename, "wb") as f:
f.write(self.DATA)
with BZ2File(filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testDecompressLimited(self):
"""Decompressed data buffering should be limited"""
bomb = bz2.compress(b'\0' * int(2e6), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\0')
max_decomp = 1 + DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(ext_decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
def test_issue44439(self):
q = array.array('Q', [1, 2, 3, 4, 5])
LENGTH = len(q) * q.itemsize
with BZ2File(BytesIO(), 'w') as f:
self.assertEqual(f.write(q), LENGTH)
self.assertEqual(f.tell(), LENGTH)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.randbytes(blocksize)
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
def testDecompressorChunksMaxsize(self):
bzd = BZ2Decompressor()
max_length = 100
out = []
# Feed some input
len_ = len(self.BIG_DATA) - 64
out.append(bzd.decompress(self.BIG_DATA[:len_],
max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data without providing more input
out.append(bzd.decompress(b'', max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data while providing more input
out.append(bzd.decompress(self.BIG_DATA[len_:],
max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
# Retrieve remaining uncompressed data
while not bzd.eof:
out.append(bzd.decompress(b'', max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
out = b"".join(out)
self.assertEqual(out, self.BIG_TEXT)
self.assertEqual(bzd.unused_data, b"")
def test_decompressor_inputbuf_1(self):
# Test reusing input buffer after moving existing
# contents to beginning
bzd = BZ2Decompressor()
out = []
# Create input buffer and fill it
self.assertEqual(bzd.decompress(self.DATA[:100],
max_length=0), b'')
# Retrieve some results, freeing capacity at beginning
# of input buffer
out.append(bzd.decompress(b'', 2))
# Add more data that fits into input buffer after
# moving existing data to beginning
out.append(bzd.decompress(self.DATA[100:105], 15))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[105:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_2(self):
# Test reusing input buffer by appending data at the
# end right away
bzd = BZ2Decompressor()
out = []
# Create input buffer and empty it
self.assertEqual(bzd.decompress(self.DATA[:200],
max_length=0), b'')
out.append(bzd.decompress(b''))
# Fill buffer with new data
out.append(bzd.decompress(self.DATA[200:280], 2))
# Append some more data, not enough to require resize
out.append(bzd.decompress(self.DATA[280:300], 2))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_3(self):
# Test reusing input buffer after extending it
bzd = BZ2Decompressor()
out = []
# Create almost full input buffer
out.append(bzd.decompress(self.DATA[:200], 5))
# Add even more data to it, requiring resize
out.append(bzd.decompress(self.DATA[200:300], 5))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_failure(self):
bzd = BZ2Decompressor()
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
# Previously, a second call could crash due to internal inconsistency
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
@support.refcount_test
def test_refleaks_in___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
bzd = BZ2Decompressor()
refs_before = gettotalrefcount()
for i in range(100):
bzd.__init__()
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
|
test_lib_test.py | #!/usr/bin/env python
import threading
import time
import unittest
from grr.lib import rdfvalue
from grr.test_lib import test_lib
class FakeTimelineTest(unittest.TestCase):
def testRunSingleSleep(self):
log = []
def foo():
while True:
log.append("foo")
time.sleep(10)
with test_lib.FakeTimeline(threading.Thread(target=foo)) as foo_timeline:
log.append("start")
foo_timeline.Run(duration=rdfvalue.Duration("5s"))
log.append("5 seconds have passed")
foo_timeline.Run(duration=rdfvalue.Duration("3s"))
log.append("3 seconds have passed")
foo_timeline.Run(duration=rdfvalue.Duration("4s"))
log.append("4 seconds have passed")
foo_timeline.Run(duration=rdfvalue.Duration("22s"))
log.append("22 seconds have passed")
self.assertEqual(log, [
"start",
"foo",
"5 seconds have passed",
"3 seconds have passed",
"foo",
"4 seconds have passed",
"foo",
"foo",
"22 seconds have passed",
])
def testRunMultiSleep(self):
log = []
def barz():
while True:
time.sleep(10)
log.append("bar")
time.sleep(5)
log.append("baz")
with test_lib.FakeTimeline(threading.Thread(target=barz)) as barz_timeline:
log.append("start")
barz_timeline.Run(duration=rdfvalue.Duration("5s"))
log.append("5 seconds have passed")
barz_timeline.Run(duration=rdfvalue.Duration("7s"))
log.append("7 seconds have passed")
barz_timeline.Run(duration=rdfvalue.Duration("1s"))
log.append("1 second has passed")
barz_timeline.Run(duration=rdfvalue.Duration("3s"))
log.append("3 seconds have passed")
barz_timeline.Run(duration=rdfvalue.Duration("20s"))
log.append("20 seconds have passed")
self.assertEqual(log, [
"start",
"5 seconds have passed",
"bar",
"7 seconds have passed",
"1 second has passed",
"baz",
"3 seconds have passed",
"bar",
"baz",
"20 seconds have passed",
])
def testRunSleepZero(self):
log = []
def norf():
time.sleep(0)
log.append("norf")
time.sleep(0)
log.append("norf")
time.sleep(0)
log.append("norf")
with test_lib.FakeTimeline(threading.Thread(target=norf)) as norf_timeline:
log.append("start")
norf_timeline.Run(duration=rdfvalue.Duration("0s"))
log.append("rest")
norf_timeline.Run(duration=rdfvalue.Duration("0s"))
log.append("stop")
self.assertEqual(log, [
"start",
"norf",
"norf",
"norf",
"rest",
"stop",
])
def testRunException(self):
log = []
def quux():
time.sleep(10)
log.append("foo")
time.sleep(10)
raise Exception("bar")
with test_lib.FakeTimeline(threading.Thread(target=quux)) as quux_timeline:
log.append("start")
quux_timeline.Run(duration=rdfvalue.Duration("6s"))
log.append("6 seconds have passed")
quux_timeline.Run(duration=rdfvalue.Duration("5s"))
log.append("5 seconds have passed")
quux_timeline.Run(duration=rdfvalue.Duration("7s"))
log.append("7 seconds have passed")
self.assertEqual(log, [
"start",
"6 seconds have passed",
"foo",
"5 seconds have passed",
"7 seconds have passed",
])
with self.assertRaisesRegexp(Exception, "bar"):
quux_timeline.Run(duration=rdfvalue.Duration("10s"))
def testNoRuns(self):
log = []
def thud():
log.append("thud")
with test_lib.FakeTimeline(threading.Thread(target=thud)):
pass
self.assertEqual(log, [])
def testRunAfterFinish(self):
log = []
def moof():
log.append("moof")
with test_lib.FakeTimeline(threading.Thread(target=moof)) as moof_timeline:
moof_timeline.Run(duration=rdfvalue.Duration("10s"))
moof_timeline.Run(duration=rdfvalue.Duration("20s"))
moof_timeline.Run(duration=rdfvalue.Duration("30s"))
self.assertEqual(log, ["moof"])
def testRunWithoutContext(self):
weez_timeline = test_lib.FakeTimeline(threading.Thread(target=lambda: None))
with self.assertRaisesRegexp(AssertionError, "called without context"):
weez_timeline.Run(duration=rdfvalue.Duration("10s"))
def testReuse(self):
log = []
def blargh():
log.append("blargh")
blargh_timeline = test_lib.FakeTimeline(threading.Thread(target=blargh))
with blargh_timeline:
blargh_timeline.Run(duration=rdfvalue.Duration("5s"))
self.assertEqual(log, ["blargh"])
with self.assertRaisesRegexp(AssertionError, "cannot be reused"):
with blargh_timeline:
blargh_timeline.Run(duration=rdfvalue.Duration("10s"))
def testTimePassage(self):
log = []
def fhesh():
log.append(rdfvalue.RDFDatetime.Now().Format("%Y-%m-%d"))
time.sleep(rdfvalue.Duration("2d").seconds)
log.append(rdfvalue.RDFDatetime.Now().Format("%Y-%m-%d"))
time.sleep(rdfvalue.Duration("15s").seconds)
log.append(rdfvalue.RDFDatetime.Now().Format("%Y-%m-%d %H:%M:%S"))
time.sleep(rdfvalue.Duration("20m").seconds)
log.append(rdfvalue.RDFDatetime.Now().Format("%Y-%m-%d %H:%M:%S"))
fhesh_timeline = test_lib.FakeTimeline(
thread=threading.Thread(target=fhesh),
now=rdfvalue.RDFDatetime.FromHumanReadable("2077-01-01"))
with fhesh_timeline:
log.append("beep (0)")
fhesh_timeline.Run(duration=rdfvalue.Duration("10s"))
log.append("beep (1)")
fhesh_timeline.Run(duration=rdfvalue.Duration("10s"))
log.append("beep (2)")
fhesh_timeline.Run(duration=rdfvalue.Duration("2d"))
log.append("beep (3)")
fhesh_timeline.Run(duration=rdfvalue.Duration("10s"))
log.append("beep (4)")
fhesh_timeline.Run(duration=rdfvalue.Duration("30m"))
log.append("beep (5)")
self.assertEqual(log, [
"beep (0)",
"2077-01-01",
"beep (1)",
"beep (2)",
"2077-01-03",
"2077-01-03 00:00:15",
"beep (3)",
"beep (4)",
"2077-01-03 00:20:15",
"beep (5)",
])
if __name__ == "__main__":
unittest.main()
|
2as.py | # -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re,os,subprocess
cl = LINETCR.LINE()
cl.login(qr=True)
cl.loginResult()
ki = LINETCR.LINE()
ki.login(qr=True)
ki.loginResult()
ki2 = LINETCR.LINE()
ki2.login(qr=True)
ki2.loginResult()
print "login success plak"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""╔═════════════
║𖤓≛≛≛≛≛≛≛≛≛≛≛≛≛𖤓
║ ✰Amii ら૯ՆԲც૦੮✰
║𖤓≛≛≛≛≛≛≛≛≛≛≛≛≛𖤓
║╔════════════
║╠[1]Status
║╠[2]Bot?
║╠[3]Respon
║╠[4]Cctv→Ciduk
║╠[5]Tagall
║╠[6]Banlist
║╠[7]Me
║╠[8]Info group
║╠[9]Cancel
║╠[10]Open/Close Qr
║╠[11]Gurl
║╠[12]Gn
║╠[13]Mid @
║╠[14]Nk @
║╠[15]Qr on/off
║╠[16]Cancel on/off
║╠[17]Join on/off
║╠[18]Share on/off
║╠[19]Bot Add @
║╠[20]Bc
║╠[21]Spam
║╠[22]Bot1/2 rename
║╠[23]Allbio:
║╠[24]Copy←→Backup
║╠[25]List group
║╠[26]SpamInvite
║╠[27]Ban all
║╠[28]Clear ban
║╠[29]Masuk
║╠[30]Keluar
║║★And More★
║╚════════════
║𖤓≛≛≛≛≛≛≛≛≛≛≛≛≛𖤓
║ Amii ら૯ՆԲც૦੮
║𖤓≛≛≛≛≛≛≛≛≛≛≛≛≛𖤓
╚═════════════"""
Setgroup ="""
Amii ら૯ՆԲც૦੮"""
KAC=[cl,ki,ki2]
mid = cl.getProfile().mid
kimid = ki.getProfile().mid
ki2mid = ki2.getProfile().mid
Bots=[mid,kimid,ki2mid]
owner =["ub5ae780d74acdd2c05b750ef7fb4ae31"]
admin = ["ub5ae780d74acdd2c05b750ef7fb4ae31"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"""тerima Kasih Sudah Menambahkan Aku Jadi Teman
≫ Aku Ga Jawab PM Karna aq Cuma Bot ≪
≫ ૦Ո૯ ƿɿ૯८૯ ら૯ՆԲც૦੮ ≪
Ready:
≫ Bot Protect ≪
≫ SelfBot ≪
ṡȗƿƿȏяṭєԀ ɞʏ:
☆ ૦Ո૯ ƿɿ૯८૯ ੮૯คɱ ☆""",
"lang":"JP",
"comment":"Thanks for add me",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Protectgr":True,
"Protectjoin":False,
"Protectcancl":True,
"Protectcancel":True,
"protectionOn":True,
"atjointicket":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{},
'copy':False,
'target':{},
'midsTarget':{}
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
backup = cl.getProfile()
backup.dispalyName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for tex in tex:
for command in commands:
if string ==command:
return True
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#------Protect Group Kick start------#
if op.type == 11:
if wait["Protectgr"] == True:
if cl.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
elif op.param2 in owner:
pass
elif op.param2 in admin:
pass
else:
try:
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "Jangan Buka Kode QR Kk")
cl.kickoutFromGroup(op.param1,[op.param2])
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + "Kami Masukin Kedalam Blacklis Boss")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Njiiir")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "\n" + "Kami Masukin Kedalam Blacklis Boss")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#------Protect Group Kick finish-----#
#------Cancel Invite User start------#
if op.type == 13:
if wait["Protectcancl"] == True:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
if op.param2 not in Bots:
if op.param2 in Bots:
pass
else:
try:
cl.cancelGroupInvitation(op.param1, gMembMids)
cl.sendText(op.param1, "Mau Invite Siapa Plak ??? \nJangan Sok Jadi Jagoan Deh Lu Njir.\nAdmin Bukan,Owner Juga Bukan\Kick Ah 😛")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
random.choice(KAC).sendText(op.param1, "Mau Invite Siapa Plak ??? \nJangan Sok Jadi Jagoan Deh Lu Njir.\nAdmin Bukan,Owner Juga Bukan\Kick Ah 😛")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#------Cancel Invite User Finish------#
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if kimid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots:
ki.acceptGroupInvitation(op.param1)
else:
ki.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if ki2mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots:
ki2.acceptGroupInvitation(op.param1)
else:
ki2.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
#------Joined User Kick start------#
if op.type == 17:
if wait["Protectjoin"] == True:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif op.param2 in admin:
pass
elif op.param2 in owner:
pass
else:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
cl.sendText(op.param1, "Protect Join nya On Boss\nMatiin dulu kalo mau Ada yang Gabung\nJoinn on/off")
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
cl.sendText(op.param1, "Protect Join nya On Boss\nMatiin dulu kalo mau Ada yang Gabung\nJoinn on/off")
#------Joined User Kick start------#
if op.type == 32: #Yang Cancel Invitan langsung ke kick
if wait["Protectcancel"] == True:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif op.param2 in admin:
pass
elif op.param2 in owner:
pass
else:
random.choice(KAC).sendText(op.param1, "Jangan Sok Jadi Jagoan Deh Lu Njir.\nAdmin Bukan,Owner Juga Bukan\Kick Ah 😛")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.type == 19:
if op.param2 not in Bots:
if op.param3 in mid:
if op.param2 not in Bots:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
wait["blacklist"][op.param2] = True
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
if op.param3 in kimid:
if op.param2 not in Bots:
try:
G = cl.getGroup(op.param1)
cl.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
G.preventJoinByTicket = True
cl.updateGroup(G)
wait["blacklist"][op.param2] = True
except:
G = random.choice(KAC).getGroup(op.param1) #Sanji Bertindak
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
if op.param3 in ki2mid:
if op.param2 not in Bots:
try:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
ki.updateGroup(G)
wait["blacklist"][op.param2] = True
except:
G = random.choice(KAC).getGroup(op.param1) #Sanji Bertindak
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
#--------------------------------
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
ki2.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
ki2.sendText(msg.to,"It is not in the black list")
#ki3.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
ki2.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
ki2.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
ki2.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
ki2.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Key","help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Admin menu"]:
#if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
elif ("Gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot1 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv1 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot2 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv2 gn ","")
ki2.updateGroup(X)
else:
ki2.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
midd = msg.text.replace("Kick ","")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
elif "Bot1 kick " in msg.text:
midd = msg.text.replace("Bot1 kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "Bot2 kick " in msg.text:
#if msg.from_ in admin:
midd = msg.text.replace("Bot2 kick ","")
ki2.kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
#if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "Bot1 invite " in msg.text:
#if msg.from_ in admin:
midd = msg.text.replace("Bot1 invite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "Bot2 invite " in msg.text:
#if msg.from_ in admin:
midd = msg.text.replace("Bot2 invite ","")
ki2.findAndAddContactsByMid(midd)
ki2.inviteIntoGroup(msg.to,[midd])
elif "Bot invite " in msg.text:
#if msg.from_ in admin:
midd = msg.text.replace("Bot invite ","")
random.choice(KAC).findAndAddContactsByMid(midd)
random.choice(KAC).inviteIntoGroup(msg.to,[midd])
#--------------- SC Add Admin ---------
elif "Admin add @" in msg.text:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Ditambahkan")
except:
pass
#print "[Command]Staff add executed"
#else:
#cl.sendText(msg.to,"Command denied.")
#cl.sendText(msg.to,"Admin permission required.")
elif "Admin remove @" in msg.text:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
#gs = k1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Dihapus")
except:
pass
#print "[Command]Staff remove executed"
#else:
#cl.sendText(msg.to,"Command denied.")
#cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Adminlist","adminlist"]:
if admin == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "||Admin ૦Ո૯ ƿɿ૯८૯ ら૯ՆԲც૦੮||\n=====================\n"
for mi_d in admin:
mc += "••>" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#--------------------------------------
#-------------- Add Friends ------------
elif "Bot Add @" in msg.text:
if msg.toType == 2:
print "[Command]Add executing"
_name = msg.text.replace("Bot Add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
ki2.findAndAddContactsByMid(target)
except:
cl.sendText(msg.to,"Error")
#else:
#cl.sendText(msg.to,"Perintah Ditolak")
#cl.sendText(msg.to,"Perintah ini Hana Untuk Owner Kami")
#-------------=SC AllBio=----------------
elif "Allbio:" in msg.text:
#if msg.from_ in admin:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki2.getProfile()
profile.statusMessage = string
ki2.updateProfile(profile)
#--------------=Finish=----------------
#--------------= SC Ganti nama Owner=--------------
elif "MyName:" in msg.text:
#if msg.from_ in admin:
string = msg.text.replace("MyName:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Name Menjadi : " + string + "")
#-------------- copy profile----------
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#@reno.a.w
if txt[1] == "on":
if jmlh <= 500:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Kelebihan batas:v")
elif txt[1] == "off":
if jmlh <= 900:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Kelebihan batas :v")
#-----------------=Selesai=------------------
elif msg.text in ["All mid"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
elif msg.text in ["Me"]:
##if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif msg.text in ["Bot1"]:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
elif msg.text in ["愛�プレゼント","Gift"]:
#if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
random.choice(KAC).sendMessage(msg)
elif msg.text in ["愛�プレゼント","All gift"]:
##if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
cl.sendMessage(msg)
ki.sendMessage(msg)
ki2.sendMessage(msg)
elif msg.text in ["Cancel","cancel"]:
##if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["bot cancel","Bot cancel"]:
##if msg.from_ in admin:
if msg.toType == 2:
G = ki.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
ki.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"No one is inviting")
else:
ki.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Buka qr","Open qr","Ourl","Open","Buka"]:
##if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"QR Sudah Dibuka")
else:
cl.sendText(msg.to,"Sudah Terbuka Boss")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Luffy buka qr","Luffy open qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Zorro buka qr","Zorro open qr"]:
if msg.toType == 2:
X = ki2.getGroup(msg.to)
X.preventJoinByTicket = False
ki2.updateGroup(X)
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Done Plak")
else:
ki2.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Can not be used outside the group")
else:
ki2.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Tutup qr","Close qr","Curl","Close","Tutup"]:
#if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
cl.sendText(msg.to,"Sudah Tertutup Boss")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif "Ginfo" == msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
QR = "Close"
else:
QR = "Open"
cl.sendText(msg.to,"[Group Name]\n" + "[•]" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + "[•]" + gCreator + "\n\n[Group Status]\n" + "[•]Status QR =>" + QR + "\n\n[Group Picture]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee)
else:
cl.sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#elif "My mid" == msg.text:
#cl.sendText(msg.to, msg.from_)
elif "Mid bot" == msg.text:
##if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,kimid)
ki2.sendText(msg.to,ki2mid)
elif msg.text in ["Wkwkwk","Wkwk","Wk","wkwkwk","wkwk","wk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
elif msg.text in ["Hehehe","Hehe","He","hehehe","hehe","he"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
ki2.sendMessage(msg)
elif msg.text in ["Galau"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
ki2.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
ki2.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
ki2.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
ki2.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
ki2.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
ki2.sendMessage(msg)
elif msg.text in ["Hmmm","Hmm","Hm","hmmm","hmm","hm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
ki2.sendMessage(msg)
elif msg.text in ["TL: "]:
##if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Bot1 rename "]:
#if msg.from_ in admin:
string = msg.text.replace("Bot1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot2 rename "]:
#if msg.from_ in admin:
string = msg.text.replace("Bot2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki2.getProfile()
profile_B.displayName = string
ki2.updateProfile(profile_B)
ki2.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Mc "]:
#if msg.from_ in admin:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#-------------------- Protect Mode ------------
elif msg.text in ["Allprotect on","Mode on"]:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Kick Joined Group On")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Udah On")
else:
cl.sendText(msg.to,"Udah On")
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Invit On")
else:
cl.sendText(msg.to,"Invit on")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Invit On")
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Cancel On")
else:
cl.sendText(msg.to,"Cancel on")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Cancel On")
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect On")
else:
cl.sendText(msg.to,"Done")
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect On")
else:
cl.sendText(msg.to,"Done")
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Link On")
else:
cl.sendText(msg.to,"Link On")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Link On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Allprotect off","Mode Off"]:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group Off")
else:
cl.sendText(msg.to,"Kick Joined Gtoup Off�")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Udah Mati Gblk")
else:
cl.sendText(msg.to,"Udah Mati Gblk")
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Invite Off")
else:
cl.sendText(msg.to,"Invite OFF")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Invite Off")
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Cancel Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Cancel Off")
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Block Off")
else:
cl.sendText(msg.to,"done")
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Block Off")
else:
cl.sendText(msg.to,"done")
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
#----------------------------------------------
elif msg.text in ["Protect on","protect on"]:
#if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Protect off","protect off"]:
#if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel on","cancel on"]:
#if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
#if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Invite on","invite on"]:
#if msg.from_ in admin:
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
#if msg.from_ in admin:
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr on","qr on"]:
#if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr off","qr off"]:
#if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact On","Contact on","contact on"]:
#if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact Off","Contact off","contact off"]:
#if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オン","Join on","Auto join on","自動å�ƒåŠ ï¼šé–‹"]:
#if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オフ","Join off","Auto join off","自動å�ƒåŠ ï¼šé—œ"]:
#if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒�。�时开请指定人数��")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的�组用自动邀请拒�")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
#if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自動退出:關"]:
#if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share on"]:
#if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["共有:オフ","Share off","Share off"]:
#if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Status","Set","Set view","Cek"]:
#if msg.from_ in admin:
md = "⭐Status Proteksi⭐\n*============*\n"
if wait["Protectcancel"] == True: md+="[•]Protect Cancel [On]\n"
else: md+="[•]Protect Cancel [Off]\n"
if wait["Protectjoin"] == True: md+="[•]Protect Group [On]\n"
else: md+="[•]Protect Group [Off]\n"
if wait["Protectgr"] == True: md+="[•]Protect QR [On]\n"
else: md+="[•]Protect QR [Off]\n"
if wait["Protectcancl"] == True: md+="[•]Protect Invite [On]\n"
else: md+="[•]Protect Invite [Off]\n"
if wait["contact"] == True: md+="[•]Contact [On]\n"
else: md+="[•]Contact [Off]\n"
if wait["autoJoin"] == True: md+="[•]Auto Join [On]\n"
else: md +="[•]Auto Join [Off]\n"
if wait["autoCancel"]["on"] == True:md+="[•]Group Cancel " + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "[•]Group Cancel [Off]\n"
if wait["leaveRoom"] == True: md+="[•]Auto Leave [On]\n"
else: md+=" Auto Leave [Off]\n"
if wait["timeline"] == True: md+="[•]Share [On]\n"
else:md+="[•]Share [Off]\n"
if wait["autoAdd"] == True: md+="[•]Auto Add [On]\n"
else:md+="[•]Auto Add [Off]\n"
if wait["commentOn"] == True: md+="[•]Comment [On]\n"
else:md+="[•]Comment [Off]\n*============*\n✰૦Ո૯ ƿɿ૯८૯ ら૯ՆԲც૦੮✰\n*============*"
cl.sendText(msg.to,md)
elif msg.text in ["Group id","Ginfo"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:\n%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
#if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"拒�了全部的邀请。")
elif "album removeat’" in msg.text:
gid = msg.text.replace("album removeat’","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
#if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"Done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
#if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•�候語確èª�"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
#---------------------Sc invite owner ke group------
elif "/invitemeto: " in msg.text:
#if msg.from_ in admin:
gid = msg.text.replace("/invitemeto: ","")
if gid == "":
ki.sendText(msg.to,"Invalid group id")
else:
try:
ki.findAndAddContactsByMid(msg.from_)
ki.inviteIntoGroup(gid,[msg.from_])
except:
ki.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#--------===---====--------------
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é �留言:開"]:
#if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["コメント:オフ","Comment off","comment off","自動首é �留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Comment","留言確�"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv2 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki2.updateGroup(x)
gurl = ki2.reissueGroupTicket(msg.to)
ki2.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Jam on/off Start-------------------#
elif msg.text in ["Jam on"]:
#if msg.from_ in admin:
if wait["clock"] == True:
ki.sendText(msg.to,"Bot 1 jam on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = ki.getProfile()
profile.displayName = wait["cName4"] + nowT
ki.updateProfile(profile)
ki.sendText(msg.to,"Jam Selalu On")
elif msg.text in ["Jam off"]:
#if msg.from_ in admin:
if wait["clock"] == False:
ki.sendText(msg.to,"Bot 4 jam off")
else:
wait["clock"] = False
ki.sendText(msg.to,"Jam Sedang Off")
#-------------Fungsi Jam on/off Finish-------------------#
#-------------Fungsi Change Clock Start------------------#
elif msg.text in ["Change clock"]:
n = msg.text.replace("Change clock","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
#-------------Fungsi Change Clock Finish-----------------#
#-------------Fungsi Jam Update Start---------------------#
elif msg.text in ["Jam Update"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = ki.getProfile()
profile.displayName = wait["cName4"] + nowT
ki.updateProfile(profile)
ki.sendText(msg.to,"Sukses update")
else:
ki.sendText(msg.to,"Aktifkan jam terlebih dulu")
#-------------Fungsi Jam Update Finish-------------------#
elif msg.text == "Cctv":
#if msg.from_ in admin:
cl.sendText(msg.to, "Cek CCTV")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
#print wait2
elif msg.text == "Ciduk":
#if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
#print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "||Di Read Oleh||%s\n||By : ✰૦Ո૯ ƿɿ૯८૯ ら૯ՆԲც૦੮✰||\n\n>Pelaku CCTV<\n%s-=CCTV=-\n•Bintitan\n•Panuan\n•Kurapan\n•Kudisan\n\nAmiin Ya Allah\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Cctv dulu Koplak\nBaru Ketik Ciduk\nDASAR PIKUN ♪")
#-----------------------------------------------
#-----------------------------------------------
#----------------Fungsi Join Group Start-----------------------#
elif msg.text in ["Masuk","Join all"]:
#if msg.from_ in owner or admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.001)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.001)
H = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
H.preventJoinByTicket = True
cl.updateGroup(H)
print "Semua Sudah Lengkap"
#----------------------Fungsi Join Group Finish---------------#
#-------------Fungsi Leave Group Start---------------#
elif msg.text in ["Cabut","Keluar","Kabur"]:
#if msg.from_ in admin:
if msg.toType == 2:
cl.getGroup(msg.to)
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
#-------------Fungsi Leave Group Finish---------------#
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Tag all","Tagall"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
#-------------Fungsi Tag All Finish---------------#
#-------------Tag All Test------------------------#
#-------------------------------------------------#
elif msg.text in ["Bot Like", "Bot like"]:
#if msg.from_ in admin:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Owner")
try:
likePost()
except:
pass
elif msg.text in ["Like temen", "Bot like temen"]:
#if msg.from_ in admin:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Teman Boss")
try:
autolike()
except:
pass
#----------------Fungsi Banned Kick Target Start-----------------------#
elif msg.text in ["Kill "]:
#if msg.from_ in admin:
if msg.toType == 2:
group = random.choice(KAC).getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Selamat tinggal")
random.choice(KAC).sendText(msg.to,"Jangan masuk lagidevil smile")
return
for jj in matched_list:
try:
klist=[ki,ki2]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#----------------Fungsi Banned Kick Target Finish----------------------#
#------------ Copy & Backup -------------#
elif msg.text in ["Backup","backup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to,"Backup done")
except Exception as e:
cl.sendText(msg.to, str (e))
elif "Copy @" in msg.text:
if msg.toType == 2:
print"[Copy]"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets=[]
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not Found")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to,"Success Copy")
except Exception as e:
print e
#-----------------------------------------
elif "Ready op" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Ready op","")
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
cl.sendText(msg.to,"Hello Kk")
cl.sendText(msg.to,"One Piece Team Mau Bersih² Group Sampah Nih")
cl.sendText(msg.to,"Karna Ini Group Sampah Jadi Mau Di Bersihin Dulu Yah\n★Jangan Baper...\n★Jangan Nangis\n★Jangan Cengeng\nBawa Enjoy Aja Kawan♪")
msg.contentType = 13
msg.contentMetadata = {'mid': 'uc9363b5a4bfacd981c3e3c082bc4d5ef'}
cl.sendMessage(msg)
cl.sendText(msg.to,"This My Team")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots or owner:
if target in owner:
pass
elif target in admin:
pass
elif target in Bots:
pass
else:
try:
klist=[cl,ki,ki2]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
#----------------Fungsi Kick User Target Start----------------------#
elif "Nk " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
#----------------Fungsi Kick User Target Finish----------------------#
elif "Blacklist @ " in msg.text:
#if msg.from_ in admin:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Plak")
except:
cl.sendText(msg.to,"error")
#----------------Fungsi Banned User Target Start-----------------------#
elif "Banned @" in msg.text:
#if msg.from_ in admin:
if msg.toType == 2:
print "[Banned] Sukses"
_name = msg.text.replace("Banned @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Dilarang Banned Bot")
ki.sendText(msg.to,"Dilarang Banned Bot")
ki2.sendText(msg.to,"Dilarang Banned Bot")
#ki2.sendText(msg.to,"Dilarang Banned Bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun telah sukses di banned")
except:
cl.sendText(msg.to,"Error")
#----------------Fungsi Banned User Target Finish-----------------------#
#----------------Mid via Tag--------------
elif "Mid @" in msg.text:
#if msg.from_ in admin:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#-----------------------------------------
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "Unban @" in msg.text:
#if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] Sukses"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.....")
ki.sendText(msg.to,"Tidak Ditemukan.....")
ki2.sendText(msg.to,"Tidak Ditemukan.....")
#ki3.sendText(msg.to,"Tidak Ditemukan.....")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun Bersih Kembali")
except:
cl.sendText(msg.to,"Error")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]:
#if msg.from_ in admin:
ki.sendText(msg.to,"P squared up!")
ki2.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
ki2.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
ki2.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
ki2.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
ki2.sendText(msg.to,"P squared up!")
#-------------Fungsi Spam Finish---------------------#
#-------------Fungsi Broadcast Start------------#
elif "Bc " in msg.text:
#if msg.from_ in admin:
bctxt = msg.text.replace("Bc ","")
a = cl.getGroupIdsJoined()
for taf in a:
cl.sendText(taf, (bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["LG"]:
#if msg.from_ in admin:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["LG2"]:
#if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------List Group------------
#------------ Keluar Dari Semua Group------
elif msg.text in ["Bot out","Op bye"]:
#if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = ki2.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
ki2.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Semua Sukses Keluar Boss")
else:
cl.sendText(msg.to,"He declined all invitations")
#------------------------End---------------------
#-----------------End-----------
elif msg.text in ["Op katakan hi"]:
ki.sendText(msg.to,"Hi buddy Har Har")
ki2.sendText(msg.to,"Hi buddy Har Har")
#ki3.sendText(msg.to,"Hi buddy Har Har")
#-----------------------------------------------
elif msg.text in ["Cv say hinata pekok"]:
ki.sendText(msg.to,"Hinata pekok Har Har")
ki2.sendText(msg.to,"Hinata pekok Har Har")
#ki3.sendText(msg.to,"Hinata pekok Har Har")
elif msg.text in ["Cv say didik pekok"]:
ki.sendText(msg.to,"Didik pekok Har Har")
ki2.sendText(msg.to,"Didik pekok Har Har")
#ki3.sendText(msg.to,"Didik pekok Har Har")
elif msg.text in ["Cv say bobo ah","Bobo dulu ah"]:
ki.sendText(msg.to,"Have a nice dream Cv Har Har")
ki2.sendText(msg.to,"Have a nice dream Cv Har Har")
#ki3.sendText(msg.to,"Have a nice dream Cv Har Har")
elif msg.text in ["Cv say chomel pekok"]:
ki.sendText(msg.to,"Chomel pekok Har Har")
ki2.sendText(msg.to,"Chomel pekok Har Har")
#ki3.sendText(msg.to,"Chomel pekok Har Har")
elif msg.text in ["Welcome"]:
ki.sendText(msg.to,"Selamat datang di Group Kami")
ki.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
elif msg.text in ["PING","Ping","ping"]:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
ki2.sendText(msg.to,"PONG double thumbs upHar Har")
#ki3.sendText(msg.to,"PONG double thumbs upHar Har")
#-----------------------------------------------
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["Absen","Absen bot","Absen dulu","Respon"]:
#if msg.from_ in admin:
cl.sendText(msg.to,"Tukang Sayur On")
ki.sendText(msg.to,"Tukang Jengkol On")
ki2.sendText(msg.to,"Tukang Kibul On")
cl.sendText(msg.to,"Semua Udah Hadir Boss\nSiap Protect Group\nAman Gak Aman Yang Penting Anu")
#-------------Fungsi Respon Finish---------------------#
#-------------Fungsi Balesan Respon Start---------------------#
elif msg.text in ["Ini Apa","ini apa","Apaan Ini","apaan ini"]:
ki.sendText(msg.to,"Ya gitu deh intinya mah questioning")
#-------------Fungsi Balesan Respon Finish---------------------#
#-------------Fungsi Speedbot Start---------------------#
elif msg.text in ["Speed","Sp"]:
#if msg.from_ in admin and owner:
start = time.time()
cl.sendText(msg.to, "Menghitung Kecepatan...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sDetik" % (elapsed_time))
#-------------Fungsi Speedbot Finish---------------------#
#-------------Fungsi Banned Send Contact Start------------------#
elif msg.text in ["Ban"]:
#if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
#ki.sendText(msg.to,"Kirim contact")
#kk.sendText(msg.to,"Kirim contact")
#kc.sendText(msg.to,"Kirim contact")
elif msg.text in ["Unban"]:
#if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
#ki.sendText(msg.to,"Kirim contact")
#kk.sendText(msg.to,"Kirim contact")
#kc.sendText(msg.to,"Kirim contact")
#-------------Fungsi Banned Send Contact Finish------------------#
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendText(msg.to,"======================")
cl.sendMessage(msg)
cl.sendText(msg.to,"======================")
cl.sendText(msg.to,"Itu Creator Kami")
#-------------Fungsi Chat ----------------
elif msg.text in ["Woy","woy","Woi","woi","bot","Bot"]:
quote = ['Istri yang baik itu Istri yang Mengizinkan Suaminya untuk Poligami 😂😂😂.','Kunci Untuk Bikin Suami Bahagia itu cuma satu..\nIzinkan Suamimu Untuk Selingkuh Coyyy ','Ah Kupret Lu','Muka Lu Kaya Jamban','Ada Orang kah disini?','Sange Euy','Ada Perawan Nganggur ga Coy?']
psn = random.choice(quote)
cl.sendText(msg.to,psn)
#-------------Fungsi Bannlist Start------------------#
elif msg.text in ["Banlist"]:
#if msg.from_ in admin:
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada Akun Terbanned")
else:
random.choice(KAC).sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Bannlist Finish------------------#
elif msg.text in ["Cek ban"]:
#if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
#if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
cl.sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
#if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "random: " in msg.text:
#if msg.from_ in admin:
if msg.toType == 2:
strnum = msg.text.replace("random: ","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "albumat'" in msg.text:
try:
albumtags = msg.text.replace("albumat'","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakecat'" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakecat'","")
cl.sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
#---------CCTV-----------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n[•]" + Name
wait2['ROM'][op.param1][op.param2] = "[•]" + Name
else:
cl.sendText
except:
pass
#---------------------
if op.type == 17:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
cl.sendText(op.param1, "Selamat Datang Di Grup " + ">>>" + str(ginfo.name) + "<<<" + "\n" + "Founder Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName + "\n\n" + "Budayakan Baca Note !!! yah Ka 😊\nSemoga Betah Kk 😘")
#cl.sendText(op.param1, "Founder Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName)
#cl.choice(KAC).sendText(op.param1,"Budayakan Baca Note !!! yah Ka 😊\nSemoga Betah Kk 😘")
#print "MEMBER HAS JOIN THE GROUP"
#------------------------
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def autolike():
for zx in range(0,200):
hasil = cl.activity(limit=200)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉Auto Like by ⭐⭐Amii⭐⭐👈\n\n™By ✰Amii ら૯ՆԲც૦੮✰")
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
ki2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ki2.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.01)
#thread3 = threading.Thread(target=autolike)
#thread3.daemon = True
#thread3.start()
#--------------------
def likePost():
for zx in range(0,200):
hasil = cl.activity(limit=200)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in owner:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto like by ^Amii ら૯ՆԲც૦੮^")
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId']," ✰Amii ら૯ՆԲც૦੮✰")
print "Like"
except:
pass
else:
print "Status Sudah di Like Plak"
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
dds.py | # -*-coding:utf-8-*-
# Copyright (c) 2020 DJI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import collections
import threading
from queue import Queue
from abc import abstractmethod
from . import logger
from . import module
from . import protocol
from concurrent.futures import ThreadPoolExecutor
SDK_FIRST_DDS_ID = 20
SDK_LAST_DDS_ID = 255
DDS_BATTERY = "battery"
DDS_GIMBAL_BASE = "gimbal_base"
DDS_VELOCITY = "velocity"
DDS_ESC = "esc"
DDS_ATTITUDE = "attitude"
DDS_IMU = "imu"
DDS_POSITION = "position"
DDS_SA_STATUS = "sa_status"
DDS_CHASSIS_MODE = "chassis_mode"
DDS_SBUS = "sbus"
DDS_SERVO = "servo"
DDS_ARM = "arm"
DDS_GRIPPER = "gripper"
DDS_GIMBAL_POS = "gimbal_pos"
DDS_STICK = "stick"
DDS_MOVE_MODE = "move_mode"
DDS_TOF = "tof"
DDS_PINBOARD = "pinboard"
DDS_TELLO_ATTITUDE = "tello_attitude"
DDS_TELLO_BATTERY = "tello_battery"
DDS_TELLO_TEMP = "tello_temperature"
DDS_TELLO_IMU = "tello_imu"
DDS_TELLO_TOF = "tello_tof"
DDS_TELLO_DRONE = "tello_drone"
DDS_TELLO_ALL = "tello_all"
TELLO_DDS_TIME_MAX = 666
SUB_UID_MAP = {
DDS_BATTERY: 0x000200096862229f,
DDS_GIMBAL_BASE: 0x00020009f5882874,
DDS_VELOCITY: 0x0002000949a4009c,
DDS_ESC: 0x00020009c14cb7c5,
DDS_ATTITUDE: 0x000200096b986306,
DDS_IMU: 0x00020009a7985b8d,
DDS_POSITION: 0x00020009eeb7cece,
DDS_SA_STATUS: 0x000200094a2c6d55,
DDS_CHASSIS_MODE: 0x000200094fcb1146,
DDS_SBUS: 0x0002000988223568,
DDS_SERVO: 0x000200095f0059e7,
DDS_ARM: 0x0002000926abd64d,
DDS_GRIPPER: 0x00020009124d156a,
DDS_GIMBAL_POS: 0x00020009f79b3c97,
DDS_STICK: 0x0002000955e9a0fa,
DDS_MOVE_MODE: 0x00020009784c7bfd,
DDS_TOF: 0x0002000986e4c05a,
DDS_PINBOARD: 0x00020009eebb9ffc,
}
DDS_SUB_TYPE_EVENT = 1
DDS_SUB_TYPE_PERIOD = 0
registered_subjects = {}
dds_cmd_filter = {(0x48, 0x08)}
class _AutoRegisterSubject(type):
'''hepler to automatically register Proto Class whereever they're defined '''
def __new__(mcs, name, bases, attrs, **kw):
return super().__new__(mcs, name, bases, attrs, **kw)
def __init__(cls, name, bases, attrs, **kw):
super().__init__(name, bases, attrs, **kw)
if name == 'Subject':
return
key = name
if key in registered_subjects.keys():
raise ValueError("Duplicate Subject class {0}".format(name))
registered_subjects[key] = cls
class Subject(metaclass=_AutoRegisterSubject):
name = "Subject"
_push_proto_cls = protocol.ProtoPushPeriodMsg
type = DDS_SUB_TYPE_PERIOD
uid = 0
freq = 1
def __init__(self):
self._task = None
self._subject_id = 1
self._callback = None
self._cb_args = None
self._cb_kw = None
def __repr__(self):
return "dds subject, name:{0}".format(self.name)
def set_callback(self, callback, args, kw):
self._callback = callback
self._cb_args = args
self._cb_kw = kw
@abstractmethod
def data_info(self):
return None
def exec(self):
self._callback(self.data_info(), *self._cb_args, **self._cb_kw)
class SubHandler(collections.namedtuple("SubHandler", ("obj subject f"))):
__slots__ = ()
class Subscriber(module.Module):
_host = protocol.host2byte(9, 0)
_sub_msg_id = SDK_FIRST_DDS_ID
def __init__(self, robot):
super().__init__(robot)
self._robot = robot
self.msg_sub_dict = {}
self._publisher = collections.defaultdict(list)
self._msg_queue = Queue()
self._dispatcher_running = False
self._dispatcher_thread = None
self.excutor = ThreadPoolExecutor(max_workers=15)
def __del__(self):
self.stop()
def get_next_subject_id(self):
if self._sub_msg_id > SDK_LAST_DDS_ID:
self._sub_msg_id = SDK_FIRST_DDS_ID
else:
self._sub_msg_id += 1
return self._sub_msg_id
def start(self):
self._dds_mutex = threading.Lock()
self._client.add_handler(self, "Subscriber", self._msg_recv)
self._dispatcher_thread = threading.Thread(target=self._dispatch_task)
self._dispatcher_thread.start()
def stop(self):
self._dispatcher_running = False
if self._dispatcher_thread:
self._msg_queue.put(None)
self._dispatcher_thread.join()
self._dispatcher_thread = None
self.excutor.shutdown(wait=False)
@classmethod
def _msg_recv(cls, self, msg):
for cmd_set, cmd_id in list(dds_cmd_filter):
if msg.cmdset == cmd_set and msg.cmdid == cmd_id:
self._msg_queue.put(msg)
def _dispatch_task(self):
self._dispatcher_running = True
logger.info("Subscriber: dispatcher_task is running...")
while self._dispatcher_running:
msg = self._msg_queue.get(1)
if msg is None:
if not self._dispatcher_running:
break
continue
self._dds_mutex.acquire()
for name in self._publisher:
handler = self._publisher[name]
logger.debug("Subscriber: msg: {0}".format(msg))
proto = msg.get_proto()
if proto is None:
logger.warning("Subscriber: _publish, msg.get_proto None, msg:{0}".format(msg))
continue
if handler.subject.type == DDS_SUB_TYPE_PERIOD and\
msg.cmdset == 0x48 and msg.cmdid == 0x08:
logger.debug("Subscriber: _publish: msg_id:{0}, subject_id:{1}".format(proto._msg_id,
handler.subject._subject_id))
if proto._msg_id == handler.subject._subject_id:
handler.subject.decode(proto._data_buf)
if handler.subject._task is None:
handler.subject._task = self.excutor.submit(handler.subject.exec)
if handler.subject._task.done() is True:
handler.subject._task = self.excutor.submit(handler.subject.exec)
elif handler.subject.type == DDS_SUB_TYPE_EVENT:
if handler.subject.cmdset == msg.cmdset and handler.subject.cmdid == msg.cmdid:
handler.subject.decode(proto._data_buf)
if handler.subject._task is None:
handler.subject._task = self.excutor.submit(handler.subject.exec)
if handler.subject._task.done() is True:
handler.subject._task = self.excutor.submit(handler.subject.exec)
self._dds_mutex.release()
logger.info("Subscriber: _publish, msg is {0}".format(msg))
def add_cmd_filter(self, cmd_set, cmd_id):
dds_cmd_filter.add((cmd_set, cmd_id))
def del_cmd_filter(self, cmd_set, cmd_id):
dds_cmd_filter.remove((cmd_set, cmd_id))
def add_subject_event_info(self, subject, callback=None, *args):
""" 添加事件订阅
:param subject: 事件订阅对应的subject
:param callback: 事件订阅对应的解析函数
"""
# 添加时间订阅仅 增加 Filter
subject.set_callback(callback, args[0], args[1])
handler = SubHandler(self, subject, callback)
subject._task = None
self._dds_mutex.acquire()
self._publisher[subject.name] = handler
self._dds_mutex.release()
self.add_cmd_filter(subject.cmdset, subject.cmdid)
return True
def del_subject_event_info(self, subject):
""" 删除事件订阅
:param subject: 事件订阅对应的subject
:param callback: 事件订阅对应的解析函数
:return: bool: 调用结果
"""
# 删除事件订阅仅从 Filter 中删除
if self._publisher[subject.name].subject._task.done() is False:
self._publisher[subject.name].subject._task.cancel()
self.del_cmd_filter(subject.cmdset, subject.cmdid)
return True
def add_subject_info(self, subject, callback=None, *args):
""" 请求数据订阅底层接口
:param subject: 数据订阅对应subject
:param callback: 订阅数据对应的解析函数
:return: bool: 调用结果
"""
# add handler to publisher.
subject.set_callback(callback, args[0], args[1])
handler = SubHandler(self, subject, callback)
self._dds_mutex.acquire()
self._publisher[subject.name] = handler
self._dds_mutex.release()
proto = protocol.ProtoAddSubMsg()
proto._node_id = self.client.hostbyte
proto._sub_freq = subject.freq
proto._sub_data_num = 1
proto._msg_id = self.get_next_subject_id()
subject._subject_id = proto._msg_id
subject._task = None
proto._sub_uid_list.append(subject.uid)
return self._send_sync_proto(proto, protocol.host2byte(9, 0))
def del_subject_info(self, subject_name):
""" 删除数据订阅消息
:param subject_name: 要删除的订阅subject
:return: bool: 删除数据订阅结果
"""
logger.debug("Subscriber: del_subject_info: name:{0}, self._publisher:{1}".format(subject_name,
self._publisher))
if subject_name in self._publisher:
subject_id = self._publisher[subject_name].subject._subject_id
if self._publisher[subject_name].subject._task.done() is False:
self._publisher[subject_name].subject._task.cancel()
self._dds_mutex.acquire()
del self._publisher[subject_name]
self._dds_mutex.release()
proto = protocol.ProtoDelMsg()
proto._msg_id = subject_id
proto._node_id = self.client.hostbyte
return self._send_sync_proto(proto, protocol.host2byte(9, 0))
else:
logger.warning("Subscriber: fail to del_subject_info", subject_name)
class TelloSubscriber(object):
def __init__(self, robot):
self._robot = robot
self._publisher = collections.defaultdict(list)
self._dispatcher_running = False
self._dispatcher_thread = None
self._client = self._robot.client
self._msg = None
self._freq = protocol.TelloDdsProto.DDS_FREQ
def __del__(self):
self.stop()
def start(self):
self._client.add_handler(self, "TelloSubscriber", self._msg_recv)
self._dispatcher_thread = threading.Thread(target=self._dispatch_task)
self._dispatcher_thread.start()
def stop(self):
self._dispatcher_running = False
if self._dispatcher_thread:
self._dispatcher_thread.join()
self._dispatcher_thread = None
@classmethod
def _msg_recv(cls, self, msg):
if protocol.TextMsg.IS_DDS_FLAG in msg.get_proto().resp:
self._msg = msg
def _dispatch_task(self):
self._dispatcher_running = True
logger.info("TelloSubscriber: dispatcher_task is running...")
interval = 1 / protocol.TelloDdsProto.DDS_FREQ
time_count = 0
while self._dispatcher_running:
msg = self._msg
if msg is None:
if not self._dispatcher_running:
break
continue
proto = msg.get_proto()
if proto is None:
logger.warning("TelloSubscirber: _publist, msg.get_proto None, msg: {0}".format(msg))
continue
for name in self._publisher:
handler = self._publisher[name]
need_time = protocol.TelloDdsProto.DDS_FREQ / handler.subject.freq
if time_count % need_time == 0:
if handler.subject.decode(proto.resp):
handler.subject.exec()
if time_count > TELLO_DDS_TIME_MAX:
time_count = 0
else:
time_count += 1
time.sleep(interval)
def add_subject_info(self, subject, callback=None, *args):
""" 请求数据订阅底层接口
:param subject: 数据订阅对应subject
:param callback: 订阅数据对应的解析函数
:return: bool: 数据订阅结果
"""
# add handler to publisher.
subject.set_callback(callback, args[0], args[1])
handler = SubHandler(self, subject, callback)
self._publisher[subject.name] = handler
logger.debug("TelloSubscriber: add_subject_info, add sub sucessfully")
def del_subject_info(self, subject_name):
""" 删除数据订阅消息
:param subject_name: 要删除的订阅subject
:return: bool: 删除数据订阅结果
"""
logger.debug("TelloSubscriber: del_subject_info: name:{0}, self._publisher:{1}".format(subject_name,
self._publisher))
if subject_name in self._publisher:
del self._publisher[subject_name]
logger.debug("TelloSubscriber: del_subject_info, del sub sucessfully")
return True
else:
logger.warning("Subscriber: fail to del_subject_info", subject_name)
return False
@property
def freq(self):
return self._freq
@freq.setter
def freq(self, in_freq):
if in_freq <= 0:
self._freq = 0
elif in_freq > protocol.TelloDdsProto.DDS_FREQ:
self._freq = protocol.TelloDdsProto.DDS_FREQ
else:
self._freq = in_freq
|
_fixtures.py | import collections
import itertools
import json
import random
from threading import Lock
from threading import Thread
import time
from unittest import TestCase
import uuid
import pytest
from dogpile.cache import CacheRegion
from dogpile.cache import register_backend
from dogpile.cache.api import CacheBackend
from dogpile.cache.api import CacheMutex
from dogpile.cache.api import NO_VALUE
from dogpile.cache.region import _backend_loader
from . import assert_raises_message
from . import eq_
class _GenericBackendFixture(object):
@classmethod
def setup_class(cls):
backend_cls = _backend_loader.load(cls.backend)
try:
arguments = cls.config_args.get("arguments", {})
backend = backend_cls(arguments)
except ImportError:
pytest.skip("Backend %s not installed" % cls.backend)
cls._check_backend_available(backend)
def tearDown(self):
if self._region_inst:
for key in self._keys:
self._region_inst.delete(key)
self._keys.clear()
elif self._backend_inst:
self._backend_inst.delete("some_key")
@classmethod
def _check_backend_available(cls, backend):
pass
region_args = {}
config_args = {}
extra_arguments = {}
_region_inst = None
_backend_inst = None
_keys = set()
def _region(self, backend=None, region_args={}, config_args={}):
_region_args = {}
# TODO: maybe we use a class-level naming convention instead
# of a dict here so that arguments merge naturally
for cls in reversed(self.__class__.__mro__):
if "region_args" in cls.__dict__:
_region_args.update(cls.__dict__["region_args"])
_region_args.update(**region_args)
_config_args = self.config_args.copy()
_config_args.update(config_args)
def _store_keys(key):
if existing_key_mangler:
key = existing_key_mangler(key)
self._keys.add(key)
return key
self._region_inst = reg = CacheRegion(**_region_args)
existing_key_mangler = self._region_inst.key_mangler
self._region_inst.key_mangler = _store_keys
self._region_inst._user_defined_key_mangler = _store_keys
reg.configure(backend or self.backend, **_config_args)
return reg
def _backend(self):
backend_cls = _backend_loader.load(self.backend)
_config_args = self.config_args.copy()
arguments = _config_args.get("arguments", {})
arguments = {**arguments, **self.extra_arguments}
self._backend_inst = backend_cls(arguments)
return self._backend_inst
class _GenericBackendTest(_GenericBackendFixture, TestCase):
def test_backend_get_nothing(self):
backend = self._backend()
eq_(backend.get_serialized("some_key"), NO_VALUE)
def test_backend_delete_nothing(self):
backend = self._backend()
backend.delete("some_key")
def test_backend_set_get_value(self):
backend = self._backend()
backend.set_serialized("some_key", b"some value")
eq_(backend.get_serialized("some_key"), b"some value")
def test_backend_delete(self):
backend = self._backend()
backend.set_serialized("some_key", b"some value")
backend.delete("some_key")
eq_(backend.get_serialized("some_key"), NO_VALUE)
def test_region_is_key_locked(self):
reg = self._region()
random_key = str(uuid.uuid1())
assert not reg.get(random_key)
eq_(reg.key_is_locked(random_key), False)
# ensures that calling key_is_locked doesn't acquire the lock
eq_(reg.key_is_locked(random_key), False)
mutex = reg.backend.get_mutex(random_key)
if mutex:
mutex.acquire()
eq_(reg.key_is_locked(random_key), True)
mutex.release()
eq_(reg.key_is_locked(random_key), False)
def test_region_set_get_value(self):
reg = self._region()
reg.set("some key", "some value")
eq_(reg.get("some key"), "some value")
def test_region_set_multiple_values(self):
reg = self._region()
values = {"key1": "value1", "key2": "value2", "key3": "value3"}
reg.set_multi(values)
eq_(values["key1"], reg.get("key1"))
eq_(values["key2"], reg.get("key2"))
eq_(values["key3"], reg.get("key3"))
def test_region_get_zero_multiple_values(self):
reg = self._region()
eq_(reg.get_multi([]), [])
def test_region_set_zero_multiple_values(self):
reg = self._region()
reg.set_multi({})
def test_region_set_zero_multiple_values_w_decorator(self):
reg = self._region()
values = reg.get_or_create_multi([], lambda: 0)
eq_(values, [])
def test_region_get_or_create_multi_w_should_cache_none(self):
reg = self._region()
values = reg.get_or_create_multi(
["key1", "key2", "key3"],
lambda *k: [None, None, None],
should_cache_fn=lambda v: v is not None,
)
eq_(values, [None, None, None])
def test_region_get_multiple_values(self):
reg = self._region()
key1 = "value1"
key2 = "value2"
key3 = "value3"
reg.set("key1", key1)
reg.set("key2", key2)
reg.set("key3", key3)
values = reg.get_multi(["key1", "key2", "key3"])
eq_([key1, key2, key3], values)
def test_region_get_nothing_multiple(self):
reg = self._region()
reg.delete_multi(["key1", "key2", "key3", "key4", "key5"])
values = {"key1": "value1", "key3": "value3", "key5": "value5"}
reg.set_multi(values)
reg_values = reg.get_multi(
["key1", "key2", "key3", "key4", "key5", "key6"]
)
eq_(
reg_values,
["value1", NO_VALUE, "value3", NO_VALUE, "value5", NO_VALUE],
)
def test_region_get_empty_multiple(self):
reg = self._region()
reg_values = reg.get_multi([])
eq_(reg_values, [])
def test_region_delete_multiple(self):
reg = self._region()
values = {"key1": "value1", "key2": "value2", "key3": "value3"}
reg.set_multi(values)
reg.delete_multi(["key2", "key10"])
eq_(values["key1"], reg.get("key1"))
eq_(NO_VALUE, reg.get("key2"))
eq_(values["key3"], reg.get("key3"))
eq_(NO_VALUE, reg.get("key10"))
def test_region_set_get_nothing(self):
reg = self._region()
reg.delete_multi(["some key"])
eq_(reg.get("some key"), NO_VALUE)
def test_region_creator(self):
reg = self._region()
def creator():
return "some value"
eq_(reg.get_or_create("some key", creator), "some value")
@pytest.mark.time_intensive
def test_threaded_dogpile(self):
# run a basic dogpile concurrency test.
# note the concurrency of dogpile itself
# is intensively tested as part of dogpile.
reg = self._region(config_args={"expiration_time": 0.25})
lock = Lock()
canary = []
def creator():
ack = lock.acquire(False)
canary.append(ack)
time.sleep(0.25)
if ack:
lock.release()
return "some value"
def f():
for x in range(5):
reg.get_or_create("some key", creator)
time.sleep(0.5)
threads = [Thread(target=f) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
assert len(canary) > 2
if not reg.backend.has_lock_timeout():
assert False not in canary
@pytest.mark.time_intensive
def test_threaded_get_multi(self):
"""This test is testing that when we get inside the "creator" for
a certain key, there are no other "creators" running at all for
that key.
With "distributed" locks, this is not 100% the case.
"""
reg = self._region(config_args={"expiration_time": 0.25})
backend_mutex = reg.backend.get_mutex("some_key")
is_custom_mutex = backend_mutex is not None
locks = dict((str(i), Lock()) for i in range(11))
canary = collections.defaultdict(list)
def creator(*keys):
assert keys
ack = [locks[key].acquire(False) for key in keys]
# print(
# ("%s " % thread.get_ident()) + \
# ", ".join(sorted("%s=%s" % (key, acq)
# for acq, key in zip(ack, keys)))
# )
for acq, key in zip(ack, keys):
canary[key].append(acq)
time.sleep(0.5)
for acq, key in zip(ack, keys):
if acq:
locks[key].release()
return ["some value %s" % k for k in keys]
def f():
for x in range(5):
reg.get_or_create_multi(
[
str(random.randint(1, 10))
for i in range(random.randint(1, 5))
],
creator,
)
time.sleep(0.5)
f()
threads = [Thread(target=f) for i in range(5)]
for t in threads:
t.start()
for t in threads:
t.join()
assert sum([len(v) for v in canary.values()]) > 10
# for non-custom mutex, check that we never had two creators
# running at once
if not is_custom_mutex:
for l in canary.values():
assert False not in l
def test_region_delete(self):
reg = self._region()
reg.set("some key", "some value")
reg.delete("some key")
reg.delete("some key")
eq_(reg.get("some key"), NO_VALUE)
@pytest.mark.time_intensive
def test_region_expire(self):
# TODO: ideally tests like these would not be using actual
# time(); instead, an artificial function where the increment
# can be controlled would be preferred. this way tests need not
# have any delay in running and additionally there is no issue
# with very slow processing missing a timeout, as is often the
# case with this particular test
expire_time = 1.00
reg = self._region(config_args={"expiration_time": expire_time})
counter = itertools.count(1)
def creator():
return "some value %d" % next(counter)
eq_(reg.get_or_create("some key", creator), "some value 1")
time.sleep(expire_time + (0.2 * expire_time))
# expiration is definitely hit
post_expiration = reg.get("some key", ignore_expiration=True)
if post_expiration is not NO_VALUE:
eq_(post_expiration, "some value 1")
eq_(reg.get_or_create("some key", creator), "some value 2")
# this line needs to run less the expire_time sec before the previous
# two or it hits the expiration
eq_(reg.get("some key"), "some value 2")
def test_decorated_fn_functionality(self):
# test for any quirks in the fn decoration that interact
# with the backend.
reg = self._region()
counter = itertools.count(1)
@reg.cache_on_arguments()
def my_function(x, y):
return next(counter) + x + y
# Start with a clean slate
my_function.invalidate(3, 4)
my_function.invalidate(5, 6)
my_function.invalidate(4, 3)
eq_(my_function(3, 4), 8)
eq_(my_function(5, 6), 13)
eq_(my_function(3, 4), 8)
eq_(my_function(4, 3), 10)
my_function.invalidate(4, 3)
eq_(my_function(4, 3), 11)
def test_exploding_value_fn(self):
reg = self._region()
def boom():
raise Exception("boom")
assert_raises_message(
Exception, "boom", reg.get_or_create, "some_key", boom
)
class _GenericSerializerTest(TestCase):
# Inheriting from this class will make test cases
# use these serialization arguments
region_args = {
"serializer": lambda v: json.dumps(v).encode("ascii"),
"deserializer": json.loads,
}
def test_uses_serializer(self):
region = self._region()
backend = region.backend
value = {"foo": ["bar", 1, False, None]}
region.set("k", value)
raw = backend.get_serialized("k")
assert isinstance(raw, bytes)
pipe = raw.find(b"|")
payload = raw[pipe + 1 :]
eq_(payload, self.region_args["serializer"](value))
eq_(region._parse_serialized_from_backend(raw).payload, value)
def test_uses_deserializer(self):
region = self._region()
value = {"foo": ["bar", 1, False, None]}
region.set("k", value)
asserted = region.get("k")
eq_(asserted, value)
# TODO: test set_multi, get_multi
class _GenericMutexTest(_GenericBackendFixture, TestCase):
def test_mutex(self):
backend = self._backend()
mutex = backend.get_mutex("foo")
assert not mutex.locked()
ac = mutex.acquire()
assert ac
ac2 = mutex.acquire(False)
assert mutex.locked()
assert not ac2
mutex.release()
assert not mutex.locked()
ac3 = mutex.acquire()
assert ac3
mutex.release()
def test_subclass_match(self):
backend = self._backend()
mutex = backend.get_mutex("foo")
assert isinstance(mutex, CacheMutex)
@pytest.mark.time_intensive
def test_mutex_threaded(self):
backend = self._backend()
backend.get_mutex("foo")
lock = Lock()
canary = []
def f():
for x in range(5):
mutex = backend.get_mutex("foo")
mutex.acquire()
for y in range(5):
ack = lock.acquire(False)
canary.append(ack)
time.sleep(0.002)
if ack:
lock.release()
mutex.release()
time.sleep(0.02)
threads = [Thread(target=f) for i in range(5)]
for t in threads:
t.start()
for t in threads:
t.join()
assert False not in canary
def test_mutex_reentrant_across_keys(self):
backend = self._backend()
for x in range(3):
m1 = backend.get_mutex("foo")
m2 = backend.get_mutex("bar")
try:
m1.acquire()
assert m2.acquire(False)
assert not m2.acquire(False)
m2.release()
assert m2.acquire(False)
assert not m2.acquire(False)
m2.release()
finally:
m1.release()
def test_reentrant_dogpile(self):
reg = self._region()
def create_foo():
return "foo" + reg.get_or_create("bar", create_bar)
def create_bar():
return "bar"
eq_(reg.get_or_create("foo", create_foo), "foobar")
eq_(reg.get_or_create("foo", create_foo), "foobar")
class MockMutex(object):
def __init__(self, key):
self.key = key
def acquire(self, blocking=True):
return True
def release(self):
return
def locked(self):
return False
class MockBackend(CacheBackend):
def __init__(self, arguments):
self.arguments = arguments
self._cache = {}
def get_mutex(self, key):
return MockMutex(key)
def get(self, key):
try:
return self._cache[key]
except KeyError:
return NO_VALUE
def get_multi(self, keys):
return [self.get(key) for key in keys]
def set(self, key, value):
self._cache[key] = value
def set_multi(self, mapping):
for key, value in mapping.items():
self.set(key, value)
def delete(self, key):
self._cache.pop(key, None)
def delete_multi(self, keys):
for key in keys:
self.delete(key)
register_backend("mock", __name__, "MockBackend")
|
test_subprocess.py | import unittest
from test import script_helper
from test import support
import subprocess
import sys
import signal
import io
import locale
import os
import errno
import tempfile
import time
import re
import selectors
import sysconfig
import warnings
import select
import shutil
import gc
import textwrap
try:
import threading
except ImportError:
threading = None
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
try:
mkstemp = tempfile.mkstemp
except AttributeError:
# tempfile.mkstemp is not available
def mkstemp():
"""Replacement for mkstemp, calling mktemp."""
fname = tempfile.mktemp()
return os.open(fname, os.O_RDWR|os.O_CREAT), fname
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(FileNotFoundError, self._assert_python, pre_args,
executable="doesnotexist")
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
original_cwd = os.getcwd()
os.chdir(cwd)
cwd = os.getcwd()
os.chdir(original_cwd)
return cwd
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with script_helper.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.addCleanup(p.stderr.close)
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.addCleanup(p.stdout.close)
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') is not None,
'the python library cannot be loaded '
'with an empty environment')
def test_empty_env(self):
with subprocess.Popen([sys.executable, "-c",
'import os; '
'print(list(os.environ.keys()))'],
stdout=subprocess.PIPE,
env={}) as p:
stdout, stderr = p.communicate()
self.assertIn(stdout.strip(),
(b"[]",
# Mac OS X adds __CF_USER_TEXT_ENCODING variable to an empty
# environment
b"['__CF_USER_TEXT_ENCODING']"))
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_ouput(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
import _bootlocale
for encoding in ['utf-16', 'utf-32-be']:
old_getpreferredencoding = _bootlocale.getpreferredencoding
# Indirectly via io.TextIOWrapper, Popen() defaults to
# locale.getpreferredencoding(False) and earlier in Python 3.2 to
# locale.getpreferredencoding().
def getpreferredencoding(do_setlocale=True):
return encoding
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
try:
_bootlocale.getpreferredencoding = getpreferredencoding
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = popen.communicate(input='')
finally:
_bootlocale.getpreferredencoding = old_getpreferredencoding
self.assertEqual(stdout, '1\n2\n3\n4')
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(OSError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
@unittest.skipIf(threading is None, "threading required")
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = mkstemp()
ofhandle, ofname = mkstemp()
efhandle, efname = mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistant directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_restore_signals(self):
# Code coverage for both values of restore_signals to make sure it
# at least does not blow up.
# A test for behavior would be complex. Contributions welcome.
subprocess.call([sys.executable, "-c", ""], restore_signals=True)
subprocess.call([sys.executable, "-c", ""], restore_signals=False)
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
def test_args_string(self):
# args is a string
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
if sys.platform.startswith("aix"):
# On AIX, the C locale uses the Latin1 encoding
decoded_value = encoded_value.decode("latin1", "surrogateescape")
else:
# On other UNIXes, the C locale uses the ASCII encoding
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open("/dev/null", os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=())
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & fds_to_keep & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open("/dev/null", os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class CommandTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
class HelperFunctionTests(unittest.TestCase):
@unittest.skipIf(mswindows, "errno and EINTR make no sense on windows")
def test_eintr_retry_call(self):
record_calls = []
def fake_os_func(*args):
record_calls.append(args)
if len(record_calls) == 2:
raise OSError(errno.EINTR, "fake interrupted system call")
return tuple(reversed(args))
self.assertEqual((999, 256),
subprocess._eintr_retry_call(fake_os_func, 256, 999))
self.assertEqual([(256, 999)], record_calls)
# This time there will be an EINTR so it will loop once.
self.assertEqual((666,),
subprocess._eintr_retry_call(fake_os_func, 666))
self.assertEqual([(256, 999), (666,), (666,)], record_calls)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
self.addCleanup(p.stdout.close)
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(FileNotFoundError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_main():
unit_tests = (ProcessTestCase,
POSIXProcessTestCase,
Win32ProcessTestCase,
CommandTests,
ProcessTestCaseNoPoll,
HelperFunctionTests,
CommandsWithSpaces,
ContextManagerTests,
)
support.run_unittest(*unit_tests)
support.reap_children()
if __name__ == "__main__":
unittest.main()
|
recognition_picture.py | #-*-coding: utf-8-*-
import sys
import os
from random import randint
from collections import OrderedDict
import time
import multiprocessing
import threading
import zmq
import params
#from zmq.utils.monitor import recv_monitor_message
#print(os.getcwd())
sys.path.append(os.getcwd())
import torch
#from torch.autograd import Variable
import utils
import dataset
from PIL import Image
import models.crnn as crnn
import alphabets
from io import BytesIO
str1 = alphabets.alphabet
HEARTBEAT_LIVENESS = 5
HEARTBEAT_INTERVAL = 1.0
INTERVAL_INIT = 1
INTERVAL_MAX = 32
PPP_READY = b"\x01"
NBR_WORKERS = 10
crnn_model_path = '~/crnn_scene_recognition_kinds_36/expr/rnn_no_IO_7_37000_0.993125.pth'
FRONTEND_HOST = "tcp://*:5678"
BACKEND_HOST= "tcp://*:5679"
alphabet = str1
nclass = len(alphabet)+1
model = None
#crnn文本信息识别
#EVENT_MAP = {}
#print("Event names:")
#for name in dir(zmq):
# if name.startswith('EVENT_'):
# value = getattr(zmq, name)
# print("%21s : %4i" % (name, value))
# EVENT_MAP[value] = name
#
#
#def event_monitor(monitor):
# while monitor.poll():
# evt = recv_monitor_message(monitor)
# evt.update({'description': EVENT_MAP[evt['event']]})
# print("Event: {}".format(evt))
# if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
# break
# monitor.close()
# print()
# print("event monitor thread done!")
def crnn_recognition(cropped_image, model):
converter = utils.strLabelConverter(alphabet)
image = cropped_image.convert('L')
#print("image size=",image.size[0]) #image shape = (w,h)
##
w = int(image.size[0] / (280 * 1.0 / params.imgW))#image.size[0] is W
#print("w=",w)
transformer = dataset.resizeNormalize((w, 32)) #format is CHW because it is a tensor
image = transformer(image)# image represents a tensor, shape is CHW
#print("image resize=",image.shape)
#if torch.cuda.is_available():
#image = image.cuda()
image = image.view(1, *image.size())
#print("image=",image.shape)
#image = Variable(image)
#print("model:",model)
model.eval()
preds = model(image)
#print("preds:",preds)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = torch.IntTensor([preds.size(0)])
#preds_size = torch.IntTensor([preds.size(0)])
#raw_pred = converter.decode(preds.data, preds_size.data, raw=True)
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
#print('%-20s => %-20s' % (raw_pred, sim_preda
print('result:{0}'.format(sim_pred))
return ('{0}'.format(sim_pred))
class Worker(object):
def __init__(self, address):
self.address = address
self.expiry = time.time() + HEARTBEAT_INTERVAL * HEARTBEAT_LIVENESS
class WorkerQueue(object):
def __init__(self):
self.queue = OrderedDict()
def ready(self, worker):
self.queue.pop(worker.address, None)
self.queue[worker.address] = worker
def purge(self):
"""Look for & kill expired workers."""
t = time.time()
expired = []
for address,worker in self.queue.items():
if t > worker.expiry: # Worker expired
expired.append(address)
for address in expired:
print ("expired worker: %s" % address)
self.queue.pop(address, None)
def next(self):
address, worker = self.queue.popitem(False)
return address
def worker_task(worker_url): #receive the request from the client or send the message to backend
context = zmq.Context(1)
worker = context.socket(zmq.DEALER)
worker.identity = u"{}-{}".format(randint(0, 0x10000), randint(0, 0x10000)).encode("ascii")
worker.connect(worker_url)
# Tell broker we're ready for work
worker.send(PPP_READY)
poll_worker = zmq.Poller()
poll_worker.register(worker, zmq.POLLIN)
while True:
socks = dict(poll_worker.poll(HEARTBEAT_INTERVAL * 1000))
#frames = worker.recv_multipart() # format:[b'client',b'',message_body]
print("{}".format(worker.identity.decode("ascii")))
if socks.get(worker) == zmq.POLLIN:
frames = worker.recv_multipart()
#print("worker gets:",frames)
if not frames:
break # Interrupted
if len(frames) == 3:
print ("I: Normal reply")
#print("framesi[2]:",frames[2])
t1 = time.time()
image = Image.open(BytesIO(frames[2]))
#print("image =",image)
#image.save("/home/cad488/test_images/"+ time.asctime(time.localtime(time.time()))+".jpg")
#result = crnn_recognition(image, model)
#print("worker model:", model)
#crnn_recognition(image, model)
result = crnn_recognition(image, model).lstrip('- ABCDEFGHJKLMNPQRSTUVWXYZ')
print("crnn_recognition:", result)
t2 = time.time()-t1
print("recogition time:", t2)
#worker.send_multipart([frames[0], b"", result.encode("ascii")])
worker.send_multipart([frames[0], b"", result.encode("ascii")])
else:
print ("E: Invalid message: %s" % frames)
def main():
"""Load balancer main loop."""
# Prepare context and sockets
url_worker = "tcp://localhost:5679"
context = zmq.Context(1)
frontend = context.socket(zmq.ROUTER)
backend = context.socket(zmq.ROUTER)
#front_monitor = frontend.get_monitor_socket()
#back_monitor = backend.get_monitor_socket()
#
frontend.bind(FRONTEND_HOST)
backend.bind(BACKEND_HOST)
# Start background tasks
def start(task, *args):
#process = multiprocessing.Process(target=task, args=args)#多进程,每个进程需要自己的context
process = threading.Thread(target=task,args=args) #多线程,参数中的变量每个线程各自拥有
process.daemon = True
process.start()
#process.join()
for i in range(NBR_WORKERS):
start(worker_task, url_worker)
#time.sleep(1)
#t = threading.Thread(target=event_monitor, args=(front_monitor,))
#t.start()
#t2 = threading.Thread(target=event_monitor, args=(back_monitor,))
#t2.start()
#start(event_monitor,front_monitor)
#start(event_monitor,back_monitor)
# Initialize main loop state
workers = WorkerQueue()
poller = zmq.Poller()
# Only poll for requests from backend until workers are available
poll_workers = zmq.Poller()
poll_workers.register(backend, zmq.POLLIN)
poll_both = zmq.Poller()
poll_both.register(frontend, zmq.POLLIN)
poll_both.register(backend, zmq.POLLIN)
while True:
if len(workers.queue) > 0:
poller = poll_both
else:
poller = poll_workers
sockets = dict(poller.poll(HEARTBEAT_INTERVAL * 1000))
#print("sockets=:",sockets)
#print("sockets backend:",sockets.get(backend))
#print("sockets frontend:",sockets.get(frontend))
#print(zmq.POLLIN)
if backend in sockets:
# Handle worker activity on the backend
frames = backend.recv_multipart()
#print("get from workers:", frames)
if not frames:
break
address = frames[0]
#print("length socks:",len(workers.queue))
#print("workers queue:",workers.queue)
#if len(workers.queue) == 0:
#poller.register(frontend, zmq.POLLIN)
workers.ready(Worker(address))
msg = frames[1:]
if len(msg) == 1:
if msg[0] not in (PPP_READY):
print("E: Invaild message from worker: %s" %msg)
else:
frontend.send_multipart(msg)
if frontend in sockets:
frames = frontend.recv_multipart()
#print("get from clients",frames)
if not frames:
break
frames.insert(0,workers.next())
#frames = [workes.next, ''] + frames
#print("get client request = ",frames)
backend.send_multipart(frames)
#if len(workers.queue) == 0:
#poller.unregister(frontend)
#workers.purge()
# Clean up
backend.close()
frontend.close()
context.term()
if __name__ == '__main__':
# crnn network
model = crnn.CRNN(32, 1, nclass, 256)
#if torch.cuda.is_available():
#model = model.cuda()
model_path = os.path.expanduser(crnn_model_path)
print('loading pretrained model from {0}'.format(model_path))
# 导入已经训练好的crnn模型
model.load_state_dict(torch.load(model_path,map_location=torch.device('cpu')))
main()
|
resource.py | # -*- coding: utf-8 -*-
import logging
from logging import handlers
import pickle
from threading import Thread
from bson.json_util import dumps
from flask_classful import FlaskView, route
from katana.shared_utils.mongoUtils import mongoUtils
# Logging Parameters
logger = logging.getLogger(__name__)
file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
file_handler.setFormatter(formatter)
stream_handler.setFormatter(stream_formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
def get_vims(filter_data=None):
"""
Return the list of available VIMs
"""
vims = []
for vim in mongoUtils.find_all("vim", data=filter_data):
vims.append(
{
"name": vim["name"],
"id": vim["id"],
"location": vim["location"],
"type": vim["type"],
"tenants": vim["tenants"],
"resources": vim["resources"],
}
)
return vims
def get_func(filter_data={}):
"""
Return the list of available Network Functions
"""
filter_data["type"] = 1
data = mongoUtils.find_all("func", data=filter_data)
functions = []
for iserv in data:
functions.append(
dict(
DB_ID=iserv["_id"],
gen=(lambda x: "4G" if x == 4 else "5G")(iserv["gen"]),
functionality=(lambda x: "Core" if x == 0 else "Radio")(iserv["func"]),
pnf_list=iserv["pnf_list"],
function_id=iserv["id"],
location=iserv["location"],
tenants=iserv["tenants"],
shared=iserv["shared"],
created_at=iserv["created_at"],
)
)
return functions
def vim_update():
"""
Gets the resources of the stored VIMs
"""
for vim in mongoUtils.find_all("vim"):
if vim["type"] == "openstack":
vim_obj = pickle.loads(mongoUtils.get("vim_obj", vim["_id"])["obj"])
resources = vim_obj.get_resources()
vim["resources"] = resources
mongoUtils.update("vim", vim["_id"], vim)
else:
resources = "N/A"
class ResourcesView(FlaskView):
route_prefix = "/api/"
def index(self):
"""
Returns the available resources on platform,
used by: `katana resource ls`
"""
# Get VIMs
vims = get_vims()
# Get Functions
functions = get_func()
resources = {"VIMs": vims, "Functions": functions}
return dumps(resources), 200
def get(self, uuid):
"""
Returns the available resources on platform,
used by: `katana resource location <location>`
"""
# Get VIMs
filter_data = {"location": uuid}
vims = get_vims(filter_data)
# Get Functions
functions = get_func(filter_data)
resources = {"VIMs": vims, "Functions": functions}
return dumps(resources), 200
@route("/update", methods=["GET", "POST"])
def update(self):
"""
Update the resource database for the stored VIMs
"""
thread = Thread(target=vim_update)
thread.start()
return "Updating resource database", 200
|
spawn_a_process.py | import multiprocessing, time
def foo(i):
print("called function in process: %s" % i)
time.sleep(2)
return
if __name__ == "__main__":
Process_jobs = []
for i in range(5):
p = multiprocessing.Process(target=foo, args=(i,))
Process_jobs.append(p)
p.start()
print(p.pid)
p.join() |
threadlocal_pass.py | '''
Passing parameters to connect two functions
1. create two threads
2. define a function(A) that calls two functions(1st:add ' allow';2nd:add ' pass')
3. point the threads to the functionA
4. start the threads
'''
import threading
def stamp(name):
res=name
res=check_family(res)
res=check_school(res)
print(res)
def check_family(s):
s+='-allow'
return s
def check_school(s):
s+='-pass'
return s
t1=threading.Thread(target=stamp,args=['orris'])
t1.start()
t2=threading.Thread(target=stamp,args=['violet'])
t2.start()
|
menu.py | import os
import threading
from oled.device import ssd1306
from oled.render import canvas
from PIL import ImageFont, ImageDraw
class Menu:
def __init__(self, options=[]):
self.options = options
self.highlightOption = None
self.rowCount = 6
self.oled = ssd1306(port=3, address=0x3C)
self.font = ImageFont.truetype('fonts/mobitec.ttf', 8)
self.renderThread = None
def set_options(self, options):
self.options = options
self.highlightOption = None
def set_highlight(self, highlight):
if highlight is None:
self.highlightOption = None
elif highlight < 0:
self.highlightOption = 0
elif highlight >= len(self.options):
self.highlightOption = len(self.options) - 1
else:
self.highlightOption = highlight
def change_highlight(self, by):
self.set_highlight(0 if self.highlightOption is None else self.highlightOption + by)
def blank(self, draw=False):
with canvas(self.oled) as draw:
draw.rectangle((-1, -1, self.oled.width+1, self.oled.height+1), outline=0, fill=0)
def render(self):
if self.renderThread is None or not self.renderThread.isAlive():
self.renderThread = threading.Thread(target=self.__render)
self.renderThread.start()
def __render(self):
self.blank()
self.__build()
def __build(self):
# adjust the start/end positions of the range
if (self.highlightOption is None) or (self.highlightOption < self.rowCount):
start = 0
end = self.rowCount
elif self.highlightOption >= (len(self.options) - self.rowCount):
end = len(self.options)
start = end - self.rowCount
else:
start = self.highlightOption
end = start + self.rowCount
# draw the menu options
with canvas(self.oled) as draw:
top = 0
for x in range(start, end):
fill = 1
if self.highlightOption is not None and self.highlightOption == x:
self.draw.rectangle([0, top, self.oled.width, top + 11], outline=0, fill=1)
fill = 0
draw.text((3, top + 1), self.options[x], font=self.font, fill=fill)
top += 10
|
test_models.py | import fcntl
from multiprocessing import Process
from pathlib import Path
import shutil
import mock
from django.core import mail
from django.utils.encoding import force_bytes, force_text
from django_celery_beat.models import PeriodicTask
from mayan.apps.common.serialization import yaml_dump
from mayan.apps.documents.models import Document
from mayan.apps.documents.tests.base import GenericDocumentTestCase
from mayan.apps.documents.tests.literals import (
TEST_COMPRESSED_DOCUMENT_PATH, TEST_NON_ASCII_DOCUMENT_FILENAME,
TEST_NON_ASCII_DOCUMENT_PATH, TEST_NON_ASCII_COMPRESSED_DOCUMENT_PATH,
TEST_SMALL_DOCUMENT_FILENAME, TEST_SMALL_DOCUMENT_PATH
)
from mayan.apps.metadata.models import MetadataType
from mayan.apps.storage.utils import mkdtemp
from ..literals import SOURCE_UNCOMPRESS_CHOICE_Y
from ..models.email_sources import EmailBaseModel, IMAPEmail, POP3Email
from ..models.scanner_sources import SaneScanner
from .literals import (
TEST_EMAIL_ATTACHMENT_AND_INLINE, TEST_EMAIL_BASE64_FILENAME,
TEST_EMAIL_BASE64_FILENAME_FROM, TEST_EMAIL_BASE64_FILENAME_SUBJECT,
TEST_EMAIL_INLINE_IMAGE, TEST_EMAIL_NO_CONTENT_TYPE,
TEST_EMAIL_NO_CONTENT_TYPE_STRING, TEST_EMAIL_ZERO_LENGTH_ATTACHMENT,
TEST_WATCHFOLDER_SUBFOLDER
)
from .mixins import SourceTestMixin, WatchFolderTestMixin
from .mocks import MockIMAPServer, MockPOP3Mailbox
class CompressedUploadsTestCase(SourceTestMixin, GenericDocumentTestCase):
auto_upload_test_document = False
def test_upload_compressed_file(self):
self.test_source.uncompress = SOURCE_UNCOMPRESS_CHOICE_Y
self.test_source.save()
with open(file=TEST_COMPRESSED_DOCUMENT_PATH, mode='rb') as file_object:
self.test_source.handle_upload(
document_type=self.test_document_type,
file_object=file_object,
expand=(
self.test_source.uncompress == SOURCE_UNCOMPRESS_CHOICE_Y
)
)
self.assertEqual(Document.objects.count(), 2)
self.assertTrue(
'first document.pdf' in Document.objects.values_list(
'label', flat=True
)
)
self.assertTrue(
'second document.pdf' in Document.objects.values_list(
'label', flat=True
)
)
class EmailBaseTestCase(GenericDocumentTestCase):
auto_upload_test_document = False
def _create_email_source(self):
self.source = EmailBaseModel(
document_type=self.test_document_type,
host='', username='', password='', store_body=True
)
def test_decode_email_base64_encoded_filename(self):
"""
Test decoding of base64 encoded e-mail attachment filename.
"""
self._create_email_source()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_BASE64_FILENAME
)
self.assertEqual(
Document.objects.first().label, 'Ampelm\xe4nnchen.txt'
)
def test_decode_email_no_content_type(self):
self._create_email_source()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_NO_CONTENT_TYPE
)
self.assertTrue(
TEST_EMAIL_NO_CONTENT_TYPE_STRING in Document.objects.first().open().read()
)
def test_decode_email_zero_length_attachment(self):
self._create_email_source()
self.source.store_body = False
self.source.save()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_ZERO_LENGTH_ATTACHMENT
)
self.assertEqual(Document.objects.count(), 0)
def test_decode_email_with_inline_image(self):
# Silence expected errors in other apps
self._silence_logger(name='mayan.apps.converter.backends')
self._create_email_source()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_INLINE_IMAGE
)
self.assertTrue(Document.objects.count(), 2)
self.assertQuerysetEqual(
ordered=False, qs=Document.objects.all(), values=(
'<Document: test-01.png>', '<Document: email_body.html>'
),
)
def test_decode_email_with_attachment_and_inline_image(self):
# Silence expected errors in other apps
self._silence_logger(name='mayan.apps.converter.backends')
self._create_email_source()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_ATTACHMENT_AND_INLINE
)
self.assertTrue(Document.objects.count(), 2)
self.assertQuerysetEqual(
ordered=False, qs=Document.objects.all(), values=(
'<Document: test-01.png>', '<Document: email_body.html>',
),
)
def test_decode_email_and_store_from_and_subject_as_metadata(self):
metadata_from = MetadataType.objects.create(name='from')
metadata_subject = MetadataType.objects.create(name='subject')
self.test_document_type.metadata.create(metadata_type=metadata_from)
self.test_document_type.metadata.create(metadata_type=metadata_subject)
self._create_email_source()
self.source.from_metadata_type = metadata_from
self.source.subject_metadata_type = metadata_subject
self.source.save()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_BASE64_FILENAME
)
document = Document.objects.first()
self.assertEqual(
document.label, 'Ampelm\xe4nnchen.txt'
)
self.assertEqual(
document.metadata.get(metadata_type=metadata_from).value,
TEST_EMAIL_BASE64_FILENAME_FROM
)
self.assertEqual(
document.metadata.get(metadata_type=metadata_subject).value,
TEST_EMAIL_BASE64_FILENAME_SUBJECT
)
def test_document_upload_no_body(self):
# Silence expected errors in other apps
self._silence_logger(name='mayan.apps.converter.backends')
self._create_email_source()
self.source.store_body = False
self.source.save()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_ATTACHMENT_AND_INLINE
)
# Only two attachments, no body document
self.assertEqual(1, Document.objects.count())
def test_document_upload_with_body(self):
# Silence expected errors in other apps
self._silence_logger(name='mayan.apps.converter.backends')
self._create_email_source()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_ATTACHMENT_AND_INLINE
)
# Only two attachments and a body document
self.assertEqual(2, Document.objects.count())
def test_metadata_yaml_attachment(self):
TEST_METADATA_VALUE_1 = 'test value 1'
TEST_METADATA_VALUE_2 = 'test value 2'
test_metadata_type_1 = MetadataType.objects.create(
name='test_metadata_type_1'
)
test_metadata_type_2 = MetadataType.objects.create(
name='test_metadata_type_2'
)
self.test_document_type.metadata.create(
metadata_type=test_metadata_type_1
)
self.test_document_type.metadata.create(
metadata_type=test_metadata_type_2
)
test_metadata_yaml = yaml_dump(
data={
test_metadata_type_1.name: TEST_METADATA_VALUE_1,
test_metadata_type_2.name: TEST_METADATA_VALUE_2,
}
)
# Create email with a test attachment first, then the metadata.yaml
# attachment
with mail.get_connection(
backend='django.core.mail.backends.locmem.EmailBackend'
) as connection:
email_message = mail.EmailMultiAlternatives(
body='test email body', connection=connection,
subject='test email subject', to=['test@example.com'],
)
email_message.attach(
filename='test_attachment',
content='test_content',
)
email_message.attach(
filename='metadata.yaml',
content=test_metadata_yaml,
)
email_message.send()
self._create_email_source()
self.source.store_body = True
self.source.save()
EmailBaseModel.process_message(
source=self.source, message_text=mail.outbox[0].message()
)
self.assertEqual(Document.objects.count(), 2)
for document in Document.objects.all():
self.assertEqual(
document.metadata.get(metadata_type=test_metadata_type_1).value,
TEST_METADATA_VALUE_1
)
self.assertEqual(
document.metadata.get(metadata_type=test_metadata_type_2).value,
TEST_METADATA_VALUE_2
)
class IMAPSourceTestCase(GenericDocumentTestCase):
auto_upload_test_document = False
@mock.patch('imaplib.IMAP4_SSL', autospec=True)
def test_download_document(self, mock_imaplib):
mock_imaplib.return_value = MockIMAPServer()
self.source = IMAPEmail.objects.create(
document_type=self.test_document_type, label='', host='',
password='', username=''
)
self.source.check_source()
self.assertEqual(
Document.objects.first().label, 'Ampelm\xe4nnchen.txt'
)
class IntervalSourceTestCase(WatchFolderTestMixin, GenericDocumentTestCase):
auto_upload_test_document = False
def test_periodic_task_create(self):
periodic_task_count = PeriodicTask.objects.count()
self._create_test_watchfolder()
self.assertTrue(PeriodicTask.objects.count() > periodic_task_count)
def test_periodic_task_delete(self):
self._create_test_watchfolder()
periodic_task_count = PeriodicTask.objects.count()
self.test_document_type.delete()
self.assertTrue(PeriodicTask.objects.count() < periodic_task_count)
class POP3SourceTestCase(GenericDocumentTestCase):
auto_upload_test_document = False
@mock.patch('poplib.POP3_SSL', autospec=True)
def test_download_document(self, mock_poplib):
mock_poplib.return_value = MockPOP3Mailbox()
self.source = POP3Email.objects.create(
document_type=self.test_document_type, label='', host='',
password='', username=''
)
self.source.check_source()
self.assertEqual(
Document.objects.first().label, 'Ampelm\xe4nnchen.txt'
)
class SANESourceTestCase(GenericDocumentTestCase):
auto_upload_test_document = False
def _create_test_scanner_source(self):
self.test_source = SaneScanner.objects.create(
label='', device_name='test'
)
def test_command(self):
self._create_test_scanner_source()
file_object = self.test_source.execute_command(arguments=('-V',))
self.assertTrue(force_bytes('sane') in file_object.read())
def test_scan(self):
self._create_test_scanner_source()
file_object = self.test_source.get_upload_file_object(
form_data={'document_type': self.test_document_type.pk}
)
self.assertTrue(file_object.size > 0)
class WatchFolderTestCase(WatchFolderTestMixin, GenericDocumentTestCase):
auto_upload_test_document = False
def test_subfolder_support_disabled(self):
self._create_test_watchfolder()
test_path = Path(self.temporary_directory)
test_subfolder = test_path.joinpath(TEST_WATCHFOLDER_SUBFOLDER)
test_subfolder.mkdir()
shutil.copy(TEST_SMALL_DOCUMENT_PATH, force_text(test_subfolder))
self.test_watch_folder.check_source()
self.assertEqual(Document.objects.count(), 0)
def test_subfolder_support_enabled(self):
self._create_test_watchfolder()
self.test_watch_folder.include_subdirectories = True
self.test_watch_folder.save()
test_path = Path(self.temporary_directory)
test_subfolder = test_path.joinpath(TEST_WATCHFOLDER_SUBFOLDER)
test_subfolder.mkdir()
shutil.copy(TEST_SMALL_DOCUMENT_PATH, force_text(test_subfolder))
self.test_watch_folder.check_source()
self.assertEqual(Document.objects.count(), 1)
document = Document.objects.first()
self.assertEqual(document.exists(), True)
self.assertEqual(document.size, 17436)
self.assertEqual(document.file_mimetype, 'image/png')
self.assertEqual(document.file_mime_encoding, 'binary')
self.assertEqual(document.label, TEST_SMALL_DOCUMENT_FILENAME)
self.assertEqual(document.page_count, 1)
def test_issue_gh_163(self):
"""
Non-ASCII chars in document name failing in upload via watch folder
gh-issue #163 https://github.com/mayan-edms/mayan-edms/issues/163
"""
self._create_test_watchfolder()
shutil.copy(TEST_NON_ASCII_DOCUMENT_PATH, self.temporary_directory)
self.test_watch_folder.check_source()
self.assertEqual(Document.objects.count(), 1)
document = Document.objects.first()
self.assertEqual(document.exists(), True)
self.assertEqual(document.size, 17436)
self.assertEqual(document.file_mimetype, 'image/png')
self.assertEqual(document.file_mime_encoding, 'binary')
self.assertEqual(document.label, TEST_NON_ASCII_DOCUMENT_FILENAME)
self.assertEqual(document.page_count, 1)
def test_issue_gh_163_expanded(self):
"""
Test Non-ASCII named documents inside Non-ASCII named compressed file
"""
self._create_test_watchfolder()
shutil.copy(
TEST_NON_ASCII_COMPRESSED_DOCUMENT_PATH, self.temporary_directory
)
self.test_watch_folder.check_source()
self.assertEqual(Document.objects.count(), 1)
document = Document.objects.first()
self.assertEqual(document.exists(), True)
self.assertEqual(document.size, 17436)
self.assertEqual(document.file_mimetype, 'image/png')
self.assertEqual(document.file_mime_encoding, 'binary')
self.assertEqual(document.label, TEST_NON_ASCII_DOCUMENT_FILENAME)
self.assertEqual(document.page_count, 1)
def test_locking_support(self):
self._create_test_watchfolder()
shutil.copy(
TEST_SMALL_DOCUMENT_PATH, self.temporary_directory
)
path_test_file = Path(
self.temporary_directory, TEST_SMALL_DOCUMENT_FILENAME
)
with path_test_file.open(mode='rb+') as file_object:
fcntl.lockf(file_object, fcntl.LOCK_EX | fcntl.LOCK_NB)
process = Process(target=self.test_watch_folder.check_source)
process.start()
process.join()
self.assertEqual(Document.objects.count(), 0)
|
enviroplus_exporter.py | #!/usr/bin/env python3
import os
import random
import requests
import time
import logging
import argparse
import subprocess
from threading import Thread
from prometheus_client import start_http_server, Gauge, Histogram
from bme280 import BME280
from enviroplus import gas
from pms5003 import PMS5003
from pms5003 import ReadTimeoutError as pmsReadTimeoutError
from pms5003 import SerialTimeoutError as pmsSerialTimeoutError
from pms5003 import ChecksumMismatchError as pmsChecksumMismatchError
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS
try:
from smbus2 import SMBus
except ImportError:
from smbus import SMBus
try:
# Transitional fix for breaking change in LTR559
from ltr559 import LTR559
ltr559 = LTR559()
except ImportError:
import ltr559
logging.basicConfig(
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logging.info("""enviroplus_exporter.py - Expose readings from the Enviro+ sensor by Pimoroni in Prometheus format
Press Ctrl+C to exit!
""")
DEBUG = os.getenv('DEBUG', 'false') == 'true'
bus = SMBus(1)
bme280 = BME280(i2c_dev=bus)
pms5003 = PMS5003()
TEMPERATURE = Gauge('temperature','Temperature measured (*C)')
PRESSURE = Gauge('pressure','Pressure measured (hPa)')
HUMIDITY = Gauge('humidity','Relative humidity measured (%)')
OXIDISING = Gauge('oxidising','Mostly nitrogen dioxide but could include NO and Hydrogen (Ohms)')
REDUCING = Gauge('reducing', 'Mostly carbon monoxide but could include H2S, Ammonia, Ethanol, Hydrogen, Methane, Propane, Iso-butane (Ohms)')
NH3 = Gauge('NH3', 'mostly Ammonia but could also include Hydrogen, Ethanol, Propane, Iso-butane (Ohms)')
LUX = Gauge('lux', 'current ambient light level (lux)')
PROXIMITY = Gauge('proximity', 'proximity, with larger numbers being closer proximity and vice versa')
PM1 = Gauge('PM1', 'Particulate Matter of diameter less than 1 micron. Measured in micrograms per cubic metre (ug/m3)')
PM25 = Gauge('PM25', 'Particulate Matter of diameter less than 2.5 microns. Measured in micrograms per cubic metre (ug/m3)')
PM10 = Gauge('PM10', 'Particulate Matter of diameter less than 10 microns. Measured in micrograms per cubic metre (ug/m3)')
OXIDISING_HIST = Histogram('oxidising_measurements', 'Histogram of oxidising measurements', buckets=(0, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 55000, 60000, 65000, 70000, 75000, 80000, 85000, 90000, 100000))
REDUCING_HIST = Histogram('reducing_measurements', 'Histogram of reducing measurements', buckets=(0, 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000, 1300000, 1400000, 1500000))
NH3_HIST = Histogram('nh3_measurements', 'Histogram of nh3 measurements', buckets=(0, 10000, 110000, 210000, 310000, 410000, 510000, 610000, 710000, 810000, 910000, 1010000, 1110000, 1210000, 1310000, 1410000, 1510000, 1610000, 1710000, 1810000, 1910000, 2000000))
PM1_HIST = Histogram('pm1_measurements', 'Histogram of Particulate Matter of diameter less than 1 micron measurements', buckets=(0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100))
PM25_HIST = Histogram('pm25_measurements', 'Histogram of Particulate Matter of diameter less than 2.5 micron measurements', buckets=(0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100))
PM10_HIST = Histogram('pm10_measurements', 'Histogram of Particulate Matter of diameter less than 10 micron measurements', buckets=(0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100))
# Setup InfluxDB
# You can generate an InfluxDB Token from the Tokens Tab in the InfluxDB Cloud UI
INFLUXDB_URL = os.getenv('INFLUXDB_URL', '')
INFLUXDB_TOKEN = os.getenv('INFLUXDB_TOKEN', '')
INFLUXDB_ORG_ID = os.getenv('INFLUXDB_ORG_ID', '')
INFLUXDB_BUCKET = os.getenv('INFLUXDB_BUCKET', '')
INFLUXDB_SENSOR_LOCATION = os.getenv('INFLUXDB_SENSOR_LOCATION', 'Adelaide')
INFLUXDB_TIME_BETWEEN_POSTS = int(os.getenv('INFLUXDB_TIME_BETWEEN_POSTS', '5'))
influxdb_client = InfluxDBClient(url=INFLUXDB_URL, token=INFLUXDB_TOKEN, org=INFLUXDB_ORG_ID)
influxdb_api = influxdb_client.write_api(write_options=SYNCHRONOUS)
# Setup Luftdaten
LUFTDATEN_TIME_BETWEEN_POSTS = int(os.getenv('LUFTDATEN_TIME_BETWEEN_POSTS', '30'))
# Sometimes the sensors can't be read. Resetting the i2c
def reset_i2c():
subprocess.run(['i2cdetect', '-y', '1'])
time.sleep(2)
# Get the temperature of the CPU for compensation
def get_cpu_temperature():
with open("/sys/class/thermal/thermal_zone0/temp", "r") as f:
temp = f.read()
temp = int(temp) / 1000.0
return temp
def get_temperature(factor):
"""Get temperature from the weather sensor"""
# Tuning factor for compensation. Decrease this number to adjust the
# temperature down, and increase to adjust up
raw_temp = bme280.get_temperature()
if factor:
cpu_temps = [get_cpu_temperature()] * 5
cpu_temp = get_cpu_temperature()
# Smooth out with some averaging to decrease jitter
cpu_temps = cpu_temps[1:] + [cpu_temp]
avg_cpu_temp = sum(cpu_temps) / float(len(cpu_temps))
temperature = raw_temp - ((avg_cpu_temp - raw_temp) / factor)
else:
temperature = raw_temp
TEMPERATURE.set(temperature) # Set to a given value
def get_pressure():
"""Get pressure from the weather sensor"""
try:
pressure = bme280.get_pressure()
PRESSURE.set(pressure)
except IOError:
logging.error("Could not get pressure readings. Resetting i2c.")
reset_i2c()
def get_humidity():
"""Get humidity from the weather sensor"""
try:
humidity = bme280.get_humidity()
HUMIDITY.set(humidity)
except IOError:
logging.error("Could not get humidity readings. Resetting i2c.")
reset_i2c()
def get_gas():
"""Get all gas readings"""
try:
readings = gas.read_all()
OXIDISING.set(readings.oxidising)
OXIDISING_HIST.observe(readings.oxidising)
REDUCING.set(readings.reducing)
REDUCING_HIST.observe(readings.reducing)
NH3.set(readings.nh3)
NH3_HIST.observe(readings.nh3)
except IOError:
logging.error("Could not get gas readings. Resetting i2c.")
reset_i2c()
def get_light():
"""Get all light readings"""
try:
lux = ltr559.get_lux()
prox = ltr559.get_proximity()
LUX.set(lux)
PROXIMITY.set(prox)
except IOError:
logging.error("Could not get lux and proximity readings. Resetting i2c.")
reset_i2c()
def get_particulates():
"""Get the particulate matter readings"""
try:
pms_data = pms5003.read()
except (pmsReadTimeoutError, pmsSerialTimeoutError, pmsChecksumMismatchError):
logging.warning("Failed to read PMS5003")
except IOError:
logging.error("Could not get particulate matter readings. Resetting i2c.")
reset_i2c()
else:
PM1.set(pms_data.pm_ug_per_m3(1.0))
PM25.set(pms_data.pm_ug_per_m3(2.5))
PM10.set(pms_data.pm_ug_per_m3(10))
PM1_HIST.observe(pms_data.pm_ug_per_m3(1.0))
PM25_HIST.observe(pms_data.pm_ug_per_m3(2.5) - pms_data.pm_ug_per_m3(1.0))
PM10_HIST.observe(pms_data.pm_ug_per_m3(10) - pms_data.pm_ug_per_m3(2.5))
def collect_all_data():
"""Collects all the data currently set"""
sensor_data = {}
sensor_data['temperature'] = TEMPERATURE.collect()[0].samples[0].value
sensor_data['humidity'] = HUMIDITY.collect()[0].samples[0].value
sensor_data['pressure'] = PRESSURE.collect()[0].samples[0].value
sensor_data['oxidising'] = OXIDISING.collect()[0].samples[0].value
sensor_data['reducing'] = REDUCING.collect()[0].samples[0].value
sensor_data['nh3'] = NH3.collect()[0].samples[0].value
sensor_data['lux'] = LUX.collect()[0].samples[0].value
sensor_data['proximity'] = PROXIMITY.collect()[0].samples[0].value
sensor_data['pm1'] = PM1.collect()[0].samples[0].value
sensor_data['pm25'] = PM25.collect()[0].samples[0].value
sensor_data['pm10'] = PM10.collect()[0].samples[0].value
return sensor_data
def post_to_influxdb():
"""Post all sensor data to InfluxDB"""
name = 'enviroplus'
tag = ['location', 'manila']
while True:
time.sleep(INFLUXDB_TIME_BETWEEN_POSTS)
data_points = []
epoch_time_now = round(time.time())
sensor_data = collect_all_data()
for field_name in sensor_data:
data_points.append(Point('enviroplus').tag('location', INFLUXDB_SENSOR_LOCATION).field(field_name, sensor_data[field_name]))
try:
influxdb_api.write(bucket=INFLUXDB_BUCKET, record=data_points)
if DEBUG:
logging.info('InfluxDB response: OK')
except Exception as exception:
logging.warning('Exception sending to InfluxDB: {}'.format(exception))
def post_to_luftdaten():
"""Post relevant sensor data to luftdaten.info"""
"""Code from: https://github.com/sepulworld/balena-environ-plus"""
LUFTDATEN_SENSOR_UID = 'raspi-' + get_serial_number()
while True:
time.sleep(LUFTDATEN_TIME_BETWEEN_POSTS)
sensor_data = collect_all_data()
values = {}
values["P2"] = sensor_data['pm25']
values["P1"] = sensor_data['pm10']
values["temperature"] = "{:.2f}".format(sensor_data['temperature'])
values["pressure"] = "{:.2f}".format(sensor_data['pressure'] * 100)
values["humidity"] = "{:.2f}".format(sensor_data['humidity'])
pm_values = dict(i for i in values.items() if i[0].startswith('P'))
temperature_values = dict(i for i in values.items() if not i[0].startswith('P'))
try:
response_pin_1 = requests.post('https://api.luftdaten.info/v1/push-sensor-data/',
json={
"software_version": "enviro-plus 0.0.1",
"sensordatavalues": [{"value_type": key, "value": val} for
key, val in pm_values.items()]
},
headers={
"X-PIN": "1",
"X-Sensor": LUFTDATEN_SENSOR_UID,
"Content-Type": "application/json",
"cache-control": "no-cache"
}
)
response_pin_11 = requests.post('https://api.luftdaten.info/v1/push-sensor-data/',
json={
"software_version": "enviro-plus 0.0.1",
"sensordatavalues": [{"value_type": key, "value": val} for
key, val in temperature_values.items()]
},
headers={
"X-PIN": "11",
"X-Sensor": LUFTDATEN_SENSOR_UID,
"Content-Type": "application/json",
"cache-control": "no-cache"
}
)
if response_pin_1.ok and response_pin_11.ok:
if DEBUG:
logging.info('Luftdaten response: OK')
else:
logging.warning('Luftdaten response: Failed')
except Exception as exception:
logging.warning('Exception sending to Luftdaten: {}'.format(exception))
def get_serial_number():
"""Get Raspberry Pi serial number to use as LUFTDATEN_SENSOR_UID"""
with open('/proc/cpuinfo', 'r') as f:
for line in f:
if line[0:6] == 'Serial':
return str(line.split(":")[1].strip())
def str_to_bool(value):
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError('{} is not a valid boolean value'.format(value))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--bind", metavar='ADDRESS', default='0.0.0.0', help="Specify alternate bind address [default: 0.0.0.0]")
parser.add_argument("-p", "--port", metavar='PORT', default=8000, type=int, help="Specify alternate port [default: 8000]")
parser.add_argument("-f", "--factor", metavar='FACTOR', type=float, help="The compensation factor to get better temperature results when the Enviro+ pHAT is too close to the Raspberry Pi board")
parser.add_argument("-e", "--enviro", metavar='ENVIRO', type=str_to_bool, help="Device is an Enviro (not Enviro+) so don't fetch data from gas and particulate sensors as they don't exist")
parser.add_argument("-d", "--debug", metavar='DEBUG', type=str_to_bool, help="Turns on more verbose logging, showing sensor output and post responses [default: false]")
parser.add_argument("-i", "--influxdb", metavar='INFLUXDB', type=str_to_bool, default='false', help="Post sensor data to InfluxDB [default: false]")
parser.add_argument("-l", "--luftdaten", metavar='LUFTDATEN', type=str_to_bool, default='false', help="Post sensor data to Luftdaten [default: false]")
args = parser.parse_args()
# Start up the server to expose the metrics.
start_http_server(addr=args.bind, port=args.port)
# Generate some requests.
if args.debug:
DEBUG = True
if args.factor:
logging.info("Using compensating algorithm (factor={}) to account for heat leakage from Raspberry Pi board".format(args.factor))
if args.influxdb:
# Post to InfluxDB in another thread
logging.info("Sensor data will be posted to InfluxDB every {} seconds".format(INFLUXDB_TIME_BETWEEN_POSTS))
influx_thread = Thread(target=post_to_influxdb)
influx_thread.start()
if args.luftdaten:
# Post to Luftdaten in another thread
LUFTDATEN_SENSOR_UID = 'raspi-' + get_serial_number()
logging.info("Sensor data will be posted to Luftdaten every {} seconds for the UID {}".format(LUFTDATEN_TIME_BETWEEN_POSTS, LUFTDATEN_SENSOR_UID))
luftdaten_thread = Thread(target=post_to_luftdaten)
luftdaten_thread.start()
logging.info("Listening on http://{}:{}".format(args.bind, args.port))
while True:
get_temperature(args.factor)
get_pressure()
get_humidity()
get_light()
if not args.enviro:
get_gas()
get_particulates()
if DEBUG:
logging.info('Sensor data: {}'.format(collect_all_data()))
|
__init__.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Implements context management so that nested/scoped contexts and threaded
contexts work properly and as expected.
"""
import threading, collections, string, logging
from ..timeout import Timeout
class _defaultdict(dict):
"""
Dictionary which loads missing keys from another dictionary.
This is neccesary because the ``default_factory`` method of
:class:`collections.defaultdict` does not provide the key.
Examples:
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['foo']
'bar'
>>> 'foo' in b
False
>>> b['foo'] = 'baz'
>>> b['foo']
'baz'
>>> del b['foo']
>>> b['foo']
'bar'
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['baz'] #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: 'baz'
"""
def __init__(self, default=None):
super(_defaultdict, self).__init__()
if default is None:
default = {}
self.default = default
def __missing__(self, key):
return self.default[key]
class _DictStack(object):
"""
Manages a dictionary-like object, permitting saving and restoring from
a stack of states via :func:`push` and :func:`pop`.
The underlying object used as ``default`` must implement ``copy``, ``clear``,
and ``update``.
Examples:
>>> t = pwnlib.context._DictStack(default={})
>>> t['key'] = 'value'
>>> t
{'key': 'value'}
>>> t.push()
>>> t
{'key': 'value'}
>>> t['key'] = 'value2'
>>> t
{'key': 'value2'}
>>> t.pop()
>>> t
{'key': 'value'}
"""
def __init__(self, default):
self._current = _defaultdict(default)
self.__stack = []
def push(self):
self.__stack.append(self._current.copy())
def pop(self):
self._current.clear()
self._current.update(self.__stack.pop())
def copy(self):
return self._current.copy()
# Pass-through container emulation routines
def __len__(self): return self._current.__len__()
def __delitem__(self, k): return self._current.__delitem__(k)
def __getitem__(self, k): return self._current.__getitem__(k)
def __setitem__(self, k, v): return self._current.__setitem__(k, v)
def __contains__(self, k): return self._current.__contains__(k)
def __iter__(self): return self._current.__iter__()
def __repr__(self): return self._current.__repr__()
def __eq__(self, other): return self._current.__eq__(other)
# Required for keyword expansion operator ** to work
def keys(self): return self._current.keys()
def values(self): return self._current.values()
def items(self): return self._current.items()
class _Tls_DictStack(threading.local, _DictStack):
"""
Per-thread implementation of :class:`_DictStack`.
Examples:
>>> t = pwnlib.context._Tls_DictStack({})
>>> t['key'] = 'value'
>>> print t
{'key': 'value'}
>>> def p(): print t
>>> thread = threading.Thread(target=p)
>>> _ = (thread.start(), thread.join())
{}
"""
pass
def _validator(validator):
"""
Validator that tis tightly coupled to the implementation
of the classes here.
This expects that the object has a ._tls property which
is of type _DictStack.
"""
name = validator.__name__
doc = validator.__doc__
def fget(self):
return self._tls[name]
def fset(self, val):
self._tls[name] = validator(self, val)
def fdel(self):
self._tls._current.pop(name,None)
return property(fget, fset, fdel, doc)
class Thread(threading.Thread):
"""
Instantiates a context-aware thread, which inherit its context when it is
instantiated. The class can be accessed both on the context module as
`pwnlib.context.Thread` and on the context singleton object inside the
context module as `pwnlib.context.context.Thread`.
Threads created by using the native :class`threading`.Thread` will have a
clean (default) context.
Regardless of the mechanism used to create any thread, the context
is de-coupled from the parent thread, so changes do not cascade
to child or parent.
Saves a copy of the context when instantiated (at ``__init__``)
and updates the new thread's context before passing control
to the user code via ``run`` or ``target=``.
Examples:
>>> context.clear()
>>> context.update(arch='arm')
>>> def p():
... print context.arch
... context.arch = 'mips'
... print context.arch
>>> # Note that a normal Thread starts with a clean context
>>> # (i386 is the default architecture)
>>> t = threading.Thread(target=p)
>>> _=(t.start(), t.join())
i386
mips
>>> # Note that the main Thread's context is unchanged
>>> print context.arch
arm
>>> # Note that a context-aware Thread receives a copy of the context
>>> t = pwnlib.context.Thread(target=p)
>>> _=(t.start(), t.join())
arm
mips
>>> # Again, the main thread is unchanged
>>> print context.arch
arm
Implementation Details:
This class implemented by hooking the private function
:func:`threading.Thread._Thread_bootstrap`, which is called before
passing control to :func:`threading.Thread.run`.
This could be done by overriding ``run`` itself, but we would have to
ensure that all uses of the class would only ever use the keyword
``target=`` for ``__init__``, or that all subclasses invoke
``super(Subclass.self).set_up_context()`` or similar.
"""
def __init__(self, *args, **kwargs):
super(Thread, self).__init__(*args, **kwargs)
self.old = context.copy()
def __bootstrap(self):
"""
Implementation Details:
This only works because the class is named ``Thread``.
If its name is changed, we have to implement this hook
differently.
"""
context.update(**self.old)
super(Thread, self).__bootstrap()
def _longest(d):
"""
Returns an OrderedDict with the contents of the input dictionary ``d``
sorted by the length of the keys, in descending order.
This is useful for performing substring matching via ``str.startswith``,
as it ensures the most complete match will be found.
>>> data = {'a': 1, 'bb': 2, 'ccc': 3}
>>> _longest(data) == data
True
>>> for i in _longest(data): print i
ccc
bb
a
"""
return collections.OrderedDict((k,d[k]) for k in sorted(d, key=len, reverse=True))
def TlsProperty(object):
def __get__(self, obj, objtype=None):
return obj._tls
class ContextType(object):
r"""
Class for specifying information about the target machine.
Intended for use as a pseudo-singleton through the global
variable ``pwnlib.context.context``, available via
``from pwn import *`` as ``context``.
The context is usually specified at the top of the Python file for clarity. ::
#!/usr/bin/env python
context.update(arch='i386', os='linux')
Currently supported properties and their defaults are listed below.
The defaults are inherited from :data:`pwnlib.context.ContextType.defaults`.
Additionally, the context is thread-aware when using
:class:`pwnlib.context.Thread` instead of :class:`threading.Thread`
(all internal ``pwntools`` threads use the former).
The context is also scope-aware by using the ``with`` keyword.
Examples:
>>> context.clear()
>>> context.update(os='linux') # doctest: +ELLIPSIS
>>> context.os == 'linux'
True
>>> context.arch = 'arm'
>>> vars(context) == {'arch': 'arm', 'bits': 32, 'endian': 'little', 'os': 'linux'}
True
>>> context.endian
'little'
>>> context.bits
32
>>> def nop():
... print pwnlib.asm.asm('nop').encode('hex')
>>> nop()
00f020e3
>>> with context.local(arch = 'i386'):
... nop()
90
>>> from pwnlib.context import Thread as PwnThread
>>> from threading import Thread as NormalThread
>>> with context.local(arch = 'mips'):
... pwnthread = PwnThread(target=nop)
... thread = NormalThread(target=nop)
>>> # Normal thread uses the default value for arch, 'i386'
>>> _=(thread.start(), thread.join())
90
>>> # Pwnthread uses the correct context from creation-time
>>> _=(pwnthread.start(), pwnthread.join())
00000000
>>> nop()
00f020e3
"""
#
# Use of 'slots' is a heavy-handed way to prevent accidents
# like 'context.architecture=' instead of 'context.arch='.
#
# Setting any properties on a ContextType object will throw an
# exception.
#
__slots__ = '_tls',
#: Default values for :class:`pwnlib.context.ContextType`
defaults = {
'arch': 'i386',
'bits': 32,
'endian': 'little',
'log_level': logging.INFO,
'newline': '\n',
'os': 'linux',
'signed': False,
'timeout': Timeout.maximum,
}
#: Valid values for :meth:`pwnlib.context.ContextType.os`
oses = sorted(('linux','freebsd','windows'))
big_32 = {'endian': 'big', 'bits': 32}
big_64 = {'endian': 'big', 'bits': 64}
little_8 = {'endian': 'little', 'bits': 8}
little_16 = {'endian': 'little', 'bits': 16}
little_32 = {'endian': 'little', 'bits': 32}
little_64 = {'endian': 'little', 'bits': 64}
#: Keys are valid values for :meth:`pwnlib.context.ContextType.arch`.
#
#: Values are defaults which are set when
#: :attr:`pwnlib.context.ContextType.arch` is set
architectures = _longest({
'aarch64': little_64,
'alpha': little_64,
'avr': little_8,
'amd64': little_64,
'arm': little_32,
'cris': little_32,
'i386': little_32,
'ia64': big_64,
'm68k': big_32,
'mips': little_32,
'mips64': little_64,
'msp430': little_16,
'powerpc': big_32,
'powerpc64': big_64,
's390': big_32,
'sparc': big_32,
'sparc64': big_64,
'thumb': little_32,
'vax': little_32,
})
#: Valid values for :attr:`endian`
endiannesses = _longest({
'be': 'big',
'eb': 'big',
'big': 'big',
'le': 'little',
'el': 'little',
'little': 'little'
})
#: Valid string values for :attr:`signed`
signednesses = {
'unsigned': False,
'no': False,
'yes': True,
'signed': True
}
valid_signed = sorted(signednesses)
def __init__(self, **kwargs):
"""
Initialize the ContextType structure.
All keyword arguments are passed to :func:`update`.
"""
self._tls = _Tls_DictStack(_defaultdict(ContextType.defaults))
self.update(**kwargs)
def copy(self):
"""copy() -> dict
Returns a copy of the current context as a dictionary.
Examples:
>>> context.clear()
>>> context.os = 'linux'
>>> vars(context) == {'os': 'linux'}
True
"""
return self._tls.copy()
@property
def __dict__(self):
return self.copy()
def update(self, *args, **kwargs):
"""
Convenience function, which is shorthand for setting multiple
variables at once.
It is a simple shorthand such that::
context.update(os = 'linux', arch = 'arm', ...)
is equivalent to::
context.os = 'linux'
context.arch = 'arm'
...
The following syntax is also valid::
context.update({'os': 'linux', 'arch': 'arm'})
Arguments:
kwargs: Variables to be assigned in the environment.
Examples:
>>> context.clear()
>>> context.update(arch = 'i386', os = 'linux')
>>> context.arch, context.os
('i386', 'linux')
"""
for arg in args:
self.update(**arg)
for k,v in kwargs.items():
setattr(self,k,v)
def __repr__(self):
v = sorted("%s = %r" % (k,v) for k,v in self._tls._current.items())
return '%s(%s)' % (self.__class__.__name__, ', '.join(v))
def local(self, **kwargs):
"""local(**kwargs) -> context manager
Create a context manager for use with the ``with`` statement.
For more information, see the example below or PEP 343.
Arguments:
kwargs: Variables to be assigned in the new environment.
Returns:
ContextType manager for managing the old and new environment.
Examples:
>>> context.clear()
>>> context.timeout = 1
>>> context.timeout == 1
True
>>> print context.timeout
1.0
>>> with context.local(timeout = 2):
... print context.timeout
... context.timeout = 3
... print context.timeout
2.0
3.0
>>> print context.timeout
1.0
"""
class LocalContext(object):
def __enter__(a):
self._tls.push()
self.update(**{k:v for k,v in kwargs.items() if v is not None})
return self
def __exit__(a, *b, **c):
self._tls.pop()
return LocalContext()
def clear(self):
"""
Clears the contents of the context.
All values are set to their defaults.
Examples:
>>> # Default value
>>> context.arch == 'i386'
True
>>> context.arch = 'arm'
>>> context.arch == 'i386'
False
>>> context.clear()
>>> context.arch == 'i386'
True
"""
self._tls._current.clear()
@_validator
def arch(self, arch):
"""
Target machine architecture.
Allowed values are listed in :attr:`pwnlib.context.ContextType.architectures`.
Side Effects:
If an architecture is specified which also implies additional
attributes (e.g. 'amd64' implies 64-bit words, 'powerpc' implies
big-endian), these attributes will be set on the context if a
user has not already set a value.
The following properties may be modified.
- :attr:`bits`
- :attr:`endian`
Raises:
AttributeError: An invalid architecture was specified
Examples:
>>> context.clear()
>>> context.arch == 'i386' # Default architecture
True
>>> context.arch = 'mips'
>>> context.arch == 'mips'
True
>>> context.arch = 'doge' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: arch must be one of ['aarch64', ..., 'thumb']
>>> context.arch = 'ppc'
>>> context.arch == 'powerpc' # Aliased architecture
True
>>> context.clear()
>>> context.bits == 32 # Default value
True
>>> context.arch = 'amd64'
>>> context.bits == 64 # New value
True
Note that expressly setting :attr:`bits` means that we use
that value instead of the default
>>> context.clear()
>>> context.bits = 32
>>> context.arch = 'amd64'
>>> context.bits == 32
True
Setting the architecture can override the defaults for
both :attr:`endian` and :attr:`bits`
>>> context.clear()
>>> context.arch = 'powerpc64'
>>> vars(context) == {'arch': 'powerpc64', 'bits': 64, 'endian': 'big'}
True
"""
# Lowercase, remove everything non-alphanumeric
arch = arch.lower()
arch = arch.replace(string.punctuation, '')
# Attempt to perform convenience and legacy compatibility
# transformations.
transform = {'x86':'i386', 'ppc': 'powerpc', 'x86_64': 'amd64'}
for k, v in transform.items():
if arch.startswith(k):
arch = arch.replace(k,v,1)
try:
defaults = ContextType.architectures[arch]
except KeyError:
raise AttributeError('AttributeError: arch must be one of %r' % sorted(ContextType.architectures))
for k,v in ContextType.architectures[arch].items():
if k not in self._tls:
self._tls[k] = v
return arch
@_validator
def bits(self, bits):
"""
Target machine word size, in bits (i.e. the size of general purpose registers).
The default value is ``32``, but changes according to :attr:`arch`.
Examples:
>>> context.clear()
>>> context.bits == 32
True
>>> context.bits = 64
>>> context.bits == 64
True
>>> context.bits = -1 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be >= 0 (-1)
"""
bits = int(bits)
if bits <= 0:
raise AttributeError("bits must be >= 0 (%r)" % bits)
return bits
@property
def bytes(self):
"""
Target machine word size, in bytes (i.e. the size of general purpose registers).
This is a convenience wrapper around ``bits / 8``.
Examples:
>>> context.bytes = 1
>>> context.bits == 8
True
>>> context.bytes = 0 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be >= 0 (0)
"""
return self.bits/8
@bytes.setter
def bytes(self, value):
self.bits = value*8
@_validator
def endian(self, endianness):
"""
Endianness of the target machine.
The default value is ``'little'``, but changes according to :attr:`arch`.
Raises:
AttributeError: An invalid endianness was provided
Examples:
>>> context.clear()
>>> context.endian == 'little'
True
>>> context.endian = 'big'
>>> context.endian
'big'
>>> context.endian = 'be'
>>> context.endian == 'big'
True
>>> context.endian = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: endian must be one of ['be', 'big', 'eb', 'el', 'le', 'little']
"""
endian = endianness.lower()
if endian not in ContextType.endiannesses:
raise AttributeError("endian must be one of %r" % sorted(ContextType.endiannesses))
return ContextType.endiannesses[endian]
@_validator
def log_level(self, value):
"""
Sets the verbosity of ``pwntools`` logging mechanism.
More specifically it controls the filtering of messages that happens
inside the handler for logging to the screen. So if you want e.g. log
all messages to a file, then this attribute makes no difference to you.
Valid values are specified by the standard Python ``logging`` module.
Default value is set to ``INFO``.
Examples:
>>> context.log_level = 'error'
>>> context.log_level == logging.ERROR
True
>>> context.log_level = 10
>>> context.log_level = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: log_level must be an integer or one of ['CRITICAL', 'DEBUG', 'ERROR', 'INFO', 'NOTSET', 'WARN', 'WARNING']
"""
# If it can be converted into an int, success
try: return int(value)
except ValueError: pass
# If it is defined in the logging module, success
try: return getattr(logging, value.upper())
except AttributeError: pass
# Otherwise, fail
level_names = filter(lambda x: isinstance(x,str), logging._levelNames)
permitted = sorted(level_names)
raise AttributeError('log_level must be an integer or one of %r' % permitted)
@_validator
def os(self, os):
"""
Operating system of the target machine.
The default value is ``linux``.
Allowed values are listed in :attr:`pwnlib.context.ContextType.oses`.
Examples:
>>> context.os = 'linux'
>>> context.os = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: os must be one of ['freebsd', 'linux', 'windows']
"""
os = os.lower()
if os not in ContextType.oses:
raise AttributeError("os must be one of %r" % sorted(ContextType.oses))
return os
@_validator
def signed(self, signed):
"""
Signed-ness for packing operation when it's not explicitly set.
Can be set to any non-string truthy value, or the specific string
values ``'signed'`` or ``'unsigned'`` which are converted into
``True`` and ``False`` correspondingly.
Examples:
>>> context.signed
False
>>> context.signed = 1
>>> context.signed
True
>>> context.signed = 'signed'
>>> context.signed
True
>>> context.signed = 'unsigned'
>>> context.signed
False
>>> context.signed = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: signed must be one of ['no', 'signed', 'unsigned', 'yes'] or a non-string truthy value
"""
try: signed = ContextType.signednesses[signed]
except KeyError: pass
if isinstance(signed, str):
raise AttributeError('signed must be one of %r or a non-string truthy value' % sorted(ContextType.signednesses))
return bool(signed)
@_validator
def timeout(self, value=Timeout.default):
"""
Default amount of time to wait for a blocking operation before it times out,
specified in seconds.
The default value is to have an infinite timeout.
See :class:`pwnlib.timeout.Timeout` for additional information on
valid values.
"""
return Timeout(value).timeout
#*************************************************************************
# ALIASES
#*************************************************************************
#
# These fields are aliases for fields defined above, either for
# convenience or compatibility.
#
#*************************************************************************
def __call__(self, **kwargs):
"""
Alias for :meth:`pwnlib.context.ContextType.update`
"""
return self.update(**kwargs)
def reset_local(self):
"""
Deprecated. Use :meth:`clear`.
"""
self.clear()
@property
def endianness(self):
"""
Legacy alias for :attr:`endian`.
Examples:
>>> context.endian == context.endianness
True
"""
return self.endian
@endianness.setter
def endianness(self, value):
self.endian = value
@property
def sign(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@sign.setter
def sign(self, value):
self.signed = value
@property
def signedness(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@signedness.setter
def signedness(self, value):
self.signed = value
@property
def word_size(self):
"""
Alias for :attr:`bits`
"""
return self.bits
@word_size.setter
def word_size(self, value):
self.bits = value
Thread = Thread
#: Global ``context`` object, used to store commonly-used pwntools settings.
#: In most cases, the context is used to infer default variables values.
#: For example, :meth:`pwnlib.asm.asm` can take an ``os`` parameter as a
#: keyword argument. If it is not supplied, the ``os`` specified by
#: ``context`` is used instead.
#: Consider it a shorthand to passing ``os=`` and ``arch=`` to every single
#: function call.
context = ContextType()
|
__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <artex.xh@gmail.com> <https://hanxiao.github.io>
import sys
import threading
import time
import uuid
import warnings
from collections import namedtuple
from functools import wraps
import numpy as np
import zmq
from zmq.utils import jsonapi
__all__ = ['__version__', 'BertClient', 'ConcurrentBertClient']
# in the future client version must match with server version
__version__ = '1.8.7'
if sys.version_info >= (3, 0):
from ._py3_var import *
else:
from ._py2_var import *
_Response = namedtuple('_Response', ['id', 'content'])
Response = namedtuple('Response', ['id', 'embedding', 'tokens'])
class BertClient(object):
def __init__(self, ip='localhost', port=5555, port_out=5556,
output_fmt='ndarray', show_server_config=False,
identity=None, check_version=True, check_length=True,
check_token_info=True, ignore_all_checks=False,
timeout=-1):
""" A client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `ignore_all_checks=True`
You can also use it as a context manager:
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
bc.encode(...)
# bc is automatically closed out of the context
:type timeout: int
:type check_version: bool
:type check_length: bool
:type check_token_info: bool
:type ignore_all_checks: bool
:type identity: str
:type show_server_config: bool
:type output_fmt: str
:type port_out: int
:type port: int
:type ip: str
:param ip: the ip address of the server
:param port: port for pushing data from client to server, must be consistent with the server side config
:param port_out: port for publishing results from server to client, must be consistent with the server side config
:param output_fmt: the output format of the sentence encodes, either in numpy array or python List[List[float]] (ndarray/list)
:param show_server_config: whether to show server configs when first connected
:param identity: the UUID of this client
:param check_version: check if server has the same version as client, raise AttributeError if not the same
:param check_length: check if server `max_seq_len` is less than the sentence length before sent
:param check_token_info: check if server can return tokenization
:param ignore_all_checks: ignore all checks, set it to True if you are not sure whether the server is ready when constructing BertClient()
:param timeout: set the timeout (milliseconds) for receive operation on the client, -1 means no timeout and wait until result returns
"""
self.context = zmq.Context()
self.sender = self.context.socket(zmq.PUSH)
self.sender.setsockopt(zmq.LINGER, 0)
self.identity = identity or str(uuid.uuid4()).encode('ascii')
self.sender.connect('tcp://%s:%d' % (ip, port))
self.receiver = self.context.socket(zmq.SUB)
self.receiver.setsockopt(zmq.LINGER, 0)
self.receiver.setsockopt(zmq.SUBSCRIBE, self.identity)
self.receiver.connect('tcp://%s:%d' % (ip, port_out))
self.request_id = 0
self.timeout = timeout
self.pending_request = set()
self.pending_response = {}
if output_fmt == 'ndarray':
self.formatter = lambda x: x
elif output_fmt == 'list':
self.formatter = lambda x: x.tolist()
else:
raise AttributeError('"output_fmt" must be "ndarray" or "list"')
self.output_fmt = output_fmt
self.port = port
self.port_out = port_out
self.ip = ip
self.length_limit = 0
self.token_info_available = False
if not ignore_all_checks and (check_version or show_server_config or check_length or check_token_info):
s_status = self.server_status
if check_version and s_status['server_version'] != self.status['client_version']:
raise AttributeError('version mismatch! server version is %s but client version is %s!\n'
'consider "pip install -U bert-serving-server bert-serving-client"\n'
'or disable version-check by "BertClient(check_version=False)"' % (
s_status['server_version'], self.status['client_version']))
if check_length:
if s_status['max_seq_len'] is not None:
self.length_limit = int(s_status['max_seq_len'])
else:
self.length_limit = None
if check_token_info:
self.token_info_available = bool(s_status['show_tokens_to_client'])
if show_server_config:
self._print_dict(s_status, 'server config:')
def close(self):
"""
Gently close all connections of the client. If you are using BertClient as context manager,
then this is not necessary.
"""
self.sender.close()
self.receiver.close()
self.context.term()
def _send(self, msg, msg_len=0):
self.request_id += 1
self.sender.send_multipart([self.identity, msg, b'%d' % self.request_id, b'%d' % msg_len])
self.pending_request.add(self.request_id)
return self.request_id
def _recv(self, wait_for_req_id=None):
try:
while True:
# a request has been returned and found in pending_response
if wait_for_req_id in self.pending_response:
response = self.pending_response.pop(wait_for_req_id)
return _Response(wait_for_req_id, response)
# receive a response
response = self.receiver.recv_multipart()
request_id = int(response[-1])
# if not wait for particular response then simply return
if not wait_for_req_id or (wait_for_req_id == request_id):
self.pending_request.remove(request_id)
return _Response(request_id, response)
elif wait_for_req_id != request_id:
self.pending_response[request_id] = response
# wait for the next response
except Exception as e:
raise e
finally:
if wait_for_req_id in self.pending_request:
self.pending_request.remove(wait_for_req_id)
def _recv_ndarray(self, wait_for_req_id=None):
request_id, response = self._recv(wait_for_req_id)
arr_info, arr_val = jsonapi.loads(response[1]), response[2]
X = np.frombuffer(_buffer(arr_val), dtype=str(arr_info['dtype']))
return Response(request_id, self.formatter(X.reshape(arr_info['shape'])), arr_info.get('tokens', ''))
@property
def status(self):
"""
Get the status of this BertClient instance
:rtype: dict[str, str]
:return: a dictionary contains the status of this BertClient instance
"""
return {
'identity': self.identity,
'num_request': self.request_id,
'num_pending_request': len(self.pending_request),
'pending_request': self.pending_request,
'output_fmt': self.output_fmt,
'port': self.port,
'port_out': self.port_out,
'server_ip': self.ip,
'client_version': __version__,
'timeout': self.timeout
}
def _timeout(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
if 'blocking' in kwargs and not kwargs['blocking']:
# override client timeout setting if `func` is called in non-blocking way
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
else:
self.receiver.setsockopt(zmq.RCVTIMEO, self.timeout)
try:
return func(self, *args, **kwargs)
except zmq.error.Again as _e:
t_e = TimeoutError(
'no response from the server (with "timeout"=%d ms), please check the following:'
'is the server still online? is the network broken? are "port" and "port_out" correct? '
'are you encoding a huge amount of data whereas the timeout is too small for that?' % self.timeout)
if _py2:
raise t_e
else:
_raise(t_e, _e)
finally:
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
return arg_wrapper
@property
@_timeout
def server_status(self):
"""
Get the current status of the server connected to this client
:return: a dictionary contains the current status of the server connected to this client
:rtype: dict[str, str]
"""
req_id = self._send(b'SHOW_CONFIG')
return jsonapi.loads(self._recv(req_id).content[1])
@_timeout
def encode(self, texts, blocking=True, is_tokenized=False, show_tokens=False):
""" Encode a list of strings to a list of vectors
`texts` should be a list of strings, each of which represents a sentence.
If `is_tokenized` is set to True, then `texts` should be list[list[str]],
outer list represents sentence and inner list represent tokens in the sentence.
Note that if `blocking` is set to False, then you need to fetch the result manually afterwards.
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
# encode untokenized sentences
bc.encode(['First do it',
'then do it right',
'then do it better'])
# encode tokenized sentences
bc.encode([['First', 'do', 'it'],
['then', 'do', 'it', 'right'],
['then', 'do', 'it', 'better']], is_tokenized=True)
:type is_tokenized: bool
:type show_tokens: bool
:type blocking: bool
:type timeout: bool
:type texts: list[str] or list[list[str]]
:param is_tokenized: whether the input texts is already tokenized
:param show_tokens: whether to include tokenization result from the server. If true, the return of the function will be a tuple
:param texts: list of sentence to be encoded. Larger list for better efficiency.
:param blocking: wait until the encoded result is returned from the server. If false, will immediately return.
:param timeout: throw a timeout error when the encoding takes longer than the predefined timeout.
:return: encoded sentence/token-level embeddings, rows correspond to sentences
:rtype: numpy.ndarray or list[list[float]]
"""
if is_tokenized:
self._check_input_lst_lst_str(texts)
else:
self._check_input_lst_str(texts)
if self.length_limit is None:
warnings.warn('server does not put a restriction on "max_seq_len", '
'it will determine "max_seq_len" dynamically according to the sequences in the batch. '
'you can restrict the sequence length on the client side for better efficiency')
elif self.length_limit and not self._check_length(texts, self.length_limit, is_tokenized):
warnings.warn('some of your sentences have more tokens than "max_seq_len=%d" set on the server, '
'as consequence you may get less-accurate or truncated embeddings.\n'
'here is what you can do:\n'
'- disable the length-check by create a new "BertClient(check_length=False)" '
'when you do not want to display this warning\n'
'- or, start a new server with a larger "max_seq_len"' % self.length_limit)
req_id = self._send(jsonapi.dumps(texts), len(texts))
if not blocking:
return None
r = self._recv_ndarray(req_id)
if self.token_info_available and show_tokens:
return r.embedding, r.tokens
elif not self.token_info_available and show_tokens:
warnings.warn('"show_tokens=True", but the server does not support showing tokenization info to clients.\n'
'here is what you can do:\n'
'- start a new server with "bert-serving-start -show_tokens_to_client ..."\n'
'- or, use "encode(show_tokens=False)"')
return r.embedding
def fetch(self, delay=.0):
""" Fetch the encoded vectors from server, use it with `encode(blocking=False)`
Use it after `encode(texts, blocking=False)`. If there is no pending requests, will return None.
Note that `fetch()` does not preserve the order of the requests! Say you have two non-blocking requests,
R1 and R2, where R1 with 256 samples, R2 with 1 samples. It could be that R2 returns first.
To fetch all results in the original sending order, please use `fetch_all(sort=True)`
:type delay: float
:param delay: delay in seconds and then run fetcher
:return: a generator that yields request id and encoded vector in a tuple, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
time.sleep(delay)
while self.pending_request:
yield self._recv_ndarray()
def fetch_all(self, sort=True, concat=False):
""" Fetch all encoded vectors from server, use it with `encode(blocking=False)`
Use it `encode(texts, blocking=False)`. If there is no pending requests, it will return None.
:type sort: bool
:type concat: bool
:param sort: sort results by their request ids. It should be True if you want to preserve the sending order
:param concat: concatenate all results into one ndarray
:return: encoded sentence/token-level embeddings in sending order
:rtype: numpy.ndarray or list[list[float]]
"""
if self.pending_request:
tmp = list(self.fetch())
if sort:
tmp = sorted(tmp, key=lambda v: v.id)
tmp = [v.embedding for v in tmp]
if concat:
if self.output_fmt == 'ndarray':
tmp = np.concatenate(tmp, axis=0)
elif self.output_fmt == 'list':
tmp = [vv for v in tmp for vv in v]
return tmp
def encode_async(self, batch_generator, max_num_batch=None, delay=0.1, **kwargs):
""" Async encode batches from a generator
:param delay: delay in seconds and then run fetcher
:param batch_generator: a generator that yields list[str] or list[list[str]] (for `is_tokenized=True`) every time
:param max_num_batch: stop after encoding this number of batches
:param `**kwargs`: the rest parameters please refer to `encode()`
:return: a generator that yields encoded vectors in ndarray, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
def run():
cnt = 0
for texts in batch_generator:
self.encode(texts, blocking=False, **kwargs)
cnt += 1
if max_num_batch and cnt == max_num_batch:
break
t = threading.Thread(target=run)
t.start()
return self.fetch(delay)
@staticmethod
def _check_length(texts, len_limit, tokenized):
if tokenized:
# texts is already tokenized as list of str
return all(len(t) <= len_limit for t in texts)
else:
# do a simple whitespace tokenizer
return all(len(t.split()) <= len_limit for t in texts)
@staticmethod
def _check_input_lst_str(texts):
if not isinstance(texts, list):
raise TypeError('"%s" must be %s, but received %s' % (texts, type([]), type(texts)))
if not len(texts):
raise ValueError(
'"%s" must be a non-empty list, but received %s with %d elements' % (texts, type(texts), len(texts)))
for idx, s in enumerate(texts):
if not isinstance(s, _str):
raise TypeError('all elements in the list must be %s, but element %d is %s' % (type(''), idx, type(s)))
if not s.strip():
raise ValueError(
'all elements in the list must be non-empty string, but element %d is %s' % (idx, repr(s)))
if _py2:
texts[idx] = _unicode(texts[idx])
@staticmethod
def _check_input_lst_lst_str(texts):
if not isinstance(texts, list):
raise TypeError('"texts" must be %s, but received %s' % (type([]), type(texts)))
if not len(texts):
raise ValueError(
'"texts" must be a non-empty list, but received %s with %d elements' % (type(texts), len(texts)))
for s in texts:
BertClient._check_input_lst_str(s)
@staticmethod
def _print_dict(x, title=None):
if title:
print(title)
for k, v in x.items():
print('%30s\t=\t%-30s' % (k, v))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class BCManager():
def __init__(self, available_bc):
self.available_bc = available_bc
self.bc = None
def __enter__(self):
self.bc = self.available_bc.pop()
return self.bc
def __exit__(self, *args):
self.available_bc.append(self.bc)
class ConcurrentBertClient(BertClient):
def __init__(self, max_concurrency=10, **kwargs):
""" A thread-safe client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `check_version=False` and `check_length=False`
:type max_concurrency: int
:param max_concurrency: the maximum number of concurrent connections allowed
"""
try:
from bert_serving.client import BertClient
except ImportError:
raise ImportError('BertClient module is not available, it is required for serving HTTP requests.'
'Please use "pip install -U bert-serving-client" to install it.'
'If you do not want to use it as an HTTP server, '
'then remove "-http_port" from the command line.')
self.available_bc = [BertClient(**kwargs) for _ in range(max_concurrency)]
self.max_concurrency = max_concurrency
def close(self):
for bc in self.available_bc:
bc.close()
def _concurrent(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
try:
with BCManager(self.available_bc) as bc:
f = getattr(bc, func.__name__)
r = f if isinstance(f, dict) else f(*args, **kwargs)
return r
except IndexError:
raise RuntimeError('Too many concurrent connections!'
'Try to increase the value of "max_concurrency", '
'currently =%d' % self.max_concurrency)
return arg_wrapper
@_concurrent
def encode(self, **kwargs):
pass
@property
@_concurrent
def server_status(self):
pass
@property
@_concurrent
def status(self):
pass
def fetch(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
def fetch_all(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
def encode_async(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
|
sync.py | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import shutil
import socket
import subprocess
import sys
import time
from pyversion import is_python3
if is_python3():
import urllib.parse
import xmlrpc.client
else:
import imp
import urlparse
import xmlrpclib
urllib = imp.new_module('urllib')
urllib.parse = urlparse
xmlrpc = imp.new_module('xmlrpc')
xmlrpc.client = xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
from git_command import GIT, git_require
from git_refs import R_HEADS, HEAD
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
The -f/--force-broken option can be used to proceed with syncing
other projects if a project sync fails.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
SSH Connections
---------------
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
Compatibility
~~~~~~~~~~~~~
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help="continue sync even if a project fails to sync")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='be more quiet')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--no-clone-bundle',
dest='no_clone_bundle', action='store_true',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--no-tags',
dest='no_tags', action='store_true',
help="don't fetch tags")
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from a known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchProjectList(self, opt, projects, *args, **kwargs):
"""Main function of the fetch threads when jobs are > 1.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and not opt.force_broken:
break
def _FetchHelper(self, opt, project, lock, fetched, pm, sem, err_event):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
err_event: We'll set this event in the case of an error (after printing
out info about the error).
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
#if not opt.quiet:
# print('Fetching project %s' % project.name)
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we call sem.release().
# - We always make sure we unlock the lock if we locked it.
try:
try:
start = time.time()
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags, archive=self.manifest.IsArchive)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
print('error: Cannot fetch %s' % project.name, file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync',
file=sys.stderr)
else:
raise _FetchError()
fetched.add(project.gitdir)
pm.update()
except _FetchError:
err_event.set()
except:
err_event.set()
raise
finally:
if did_lock:
lock.release()
sem.release()
return success
def _Fetch(self, projects, opt):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching projects', len(projects))
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
err_event = _threading.Event()
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and not opt.force_broken:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
lock=lock,
fetched=fetched,
pm=pm,
sem=sem,
err_event=err_event)
if self.jobs > 1:
t = _threading.Thread(target = self._FetchProjectList,
kwargs = kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print('\nerror: Exited sync due to fetch errors', file=sys.stderr)
sys.exit(1)
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects)
return fetched
def _GCProjects(self, projects):
gitdirs = {}
for project in projects:
gitdirs[project.gitdir] = project.bare_git
has_dash_c = git_require((1, 7, 2))
if multiprocessing and has_dash_c:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count / jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
err_event = _threading.Event()
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except:
err_event.set()
raise
finally:
sem.release()
for bare_git in gitdirs.values():
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
if err_event.isSet():
print('\nerror: Exited sync due to gc errors', file=sys.stderr)
sys.exit(1)
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def UpdateProjectList(self):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
fd = open(file_path, 'r')
try:
old_project_paths = fd.read().split('\n')
finally:
fd.close()
for path in old_project_paths:
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
if os.path.exists(self.manifest.topdir + '/' + path):
gitdir = os.path.join(self.manifest.topdir, path, '.git')
project = Project(
manifest = self.manifest,
name = path,
remote = RemoteSpec('origin'),
gitdir = gitdir,
objdir = gitdir,
worktree = os.path.join(self.manifest.topdir, path),
relpath = path,
revisionExpr = 'HEAD',
revisionId = None,
groups = None)
if project.IsDirty():
print('error: Cannot remove project "%s": uncommitted changes '
'are present' % project.relpath, file=sys.stderr)
print(' commit changes, then run sync again',
file=sys.stderr)
return -1
else:
print('Deleting obsolete path %s' % project.worktree,
file=sys.stderr)
shutil.rmtree(project.worktree)
# Try deleting parent subdirs if they are empty
project_dir = os.path.dirname(project.worktree)
while project_dir != self.manifest.topdir:
try:
os.rmdir(project_dir)
except OSError:
break
project_dir = os.path.dirname(project_dir)
new_project_paths.sort()
fd = open(file_path, 'w')
try:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
finally:
fd.close()
return 0
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) / 3)
if opt.network_only and opt.detach_head:
print('error: cannot combine -n and -d', file=sys.stderr)
sys.exit(1)
if opt.network_only and opt.local_only:
print('error: cannot combine -n and -l', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_sync:
print('error: cannot combine -m and -s', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_tag:
print('error: cannot combine -m and -t', file=sys.stderr)
sys.exit(1)
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
print('error: -u and -p may only be combined with -s or -t',
file=sys.stderr)
sys.exit(1)
if None in [opt.manifest_server_username, opt.manifest_server_password]:
print('error: both -u and -p must be given', file=sys.stderr)
sys.exit(1)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
if opt.smart_sync or opt.smart_tag:
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if not '@' in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
print('.netrc file does not exist or could not be opened',
file=sys.stderr)
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
username, _account, password = \
info.authenticators(parse_result.hostname)
except TypeError:
# TypeError is raised when the given hostname is not present
# in the .netrc file.
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
try:
server = xmlrpc.client.Server(manifest_server)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
env = os.environ.copy()
if 'SYNC_TARGET' in env:
target = env['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif 'TARGET_PRODUCT' in env and 'TARGET_BUILD_VARIANT' in env:
target = '%s-%s' % (env['TARGET_PRODUCT'],
env['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = "smart_sync_override.xml"
manifest_path = os.path.join(self.manifest.manifestProject.worktree,
manifest_name)
try:
f = open(manifest_path, 'w')
try:
f.write(manifest_str)
finally:
f.close()
except IOError:
print('error: cannot write manifest to %s' % manifest_path,
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
rp = self.manifest.repoProject
rp.PreSync()
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.local_only:
mp.Sync_NetworkHalf(quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
no_tags=opt.no_tags)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
mp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
self._ReloadManifest(manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt)
_PostRepoFetch(rp, opt.no_repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt))
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList():
sys.exit(1)
syncbuf = SyncBuffer(mp.config,
detach_head = opt.detach_head)
pm = Progress('Syncing work tree', len(all_projects))
for project in all_projects:
pm.update()
if project.worktree:
project.Sync_LocalHalf(syncbuf)
pm.end()
print(file=sys.stderr)
if not syncbuf.Finish():
sys.exit(1)
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, no_repo_verify=False, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if no_repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir.encode()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a*t) + ((1-a) * old)
def _Load(self):
if self._times is None:
try:
f = open(self._path)
try:
self._times = json.load(f)
finally:
f.close()
except (IOError, ValueError):
try:
os.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
f = open(self._path, 'w')
try:
json.dump(self._times, f, indent=2)
finally:
f.close()
except (IOError, TypeError):
try:
os.remove(self._path)
except OSError:
pass
|
__init__.py | from fastapi import APIRouter, Response
from starlette.status import *
import os, json
from pymongo import MongoClient
from logging import debug, info, warning, error, critical, exception
import time
import requests
import threading
from component_util import post_update
conf = json.loads(os.environ['CONFIG'])
mongo_client = MongoClient(
host=conf['mongodb']['host'],
port=conf['mongodb']['port'],
username=conf['mongodb']['username'],
password=conf['mongodb']['password'],
tls=conf['mongodb']['tls']
)
db = mongo_client[conf['mongodb']['database']]
class OpenWeatherWrapper:
def __init__(
self,
api,
latitude,
longitude,
units='imperial',
update=300
) -> None:
self.api = api
self.lat = latitude
self.lon = longitude
self.units = units
self.update = update
def fetchLoop(self):
while True:
try:
data = requests.get('https://api.openweathermap.org/data/2.5/onecall', params={
'lat': self.lat,
'lon': self.lon,
'exclude': 'minutely,hourly,alerts',
'appid': self.api,
'units': self.units
}).json()
data['record'] = 'weather'
db.info.replace_one({'record': 'weather'}, data, upsert=True)
post_update('info.weather')
except:
exception('Failed to fetch weather data: ')
time.sleep(self.update)
def start(self):
threading.Thread(name="thread_jumpstart-next_component_info_weather", target=self.fetchLoop, daemon=True).start()
info('Started info.weather thread')
InfoComponentRouter = APIRouter(
prefix='/api/components/info',
tags=['component', 'info']
)
@InfoComponentRouter.get('/weather')
async def get_weather(r: Response):
weather_data = db.info.find_one(filter={'record': 'weather'})
if weather_data:
del weather_data['_id']
weather_data['units'] = conf['components']['info']['weather']['units']
return {
'result': 'success',
'data': weather_data
}
else:
r.status_code = HTTP_404_NOT_FOUND
return {'result': 'failed: weather record not generated'} |
data_io.py | ##########################################################
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
import numpy as np
import sys
from utils import compute_cw_max,dict_fea_lab_arch,is_sequential_dict
import os
import configparser
import re, gzip, struct
def load_dataset(fea_scp,fea_opts,lab_folder,lab_opts,left,right, max_sequence_length, output_folder, fea_only=False):
fea = { k:m for k,m in read_mat_ark('ark:copy-feats scp:'+fea_scp+' ark:- |'+fea_opts,output_folder) }
if not fea_only:
lab = { k:v for k,v in read_vec_int_ark('gunzip -c '+lab_folder+'/ali*.gz | '+lab_opts+' '+lab_folder+'/final.mdl ark:- ark:-|',output_folder) if k in fea} # Note that I'm copying only the aligments of the loaded fea
fea = {k: v for k, v in fea.items() if k in lab} # This way I remove all the features without an aligment (see log file in alidir "Did not Succeded")
end_snt=0
end_index=[]
snt_name=[]
fea_conc=[]
lab_conc=[]
tmp=0
for k in sorted(sorted(fea.keys()), key=lambda k: len(fea[k])):
#####
# If the sequence length is above the threshold, we split it with a minimal length max/4
# If max length = 500, then the split will start at 500 + (500/4) = 625.
# A seq of length 625 will be splitted in one of 500 and one of 125
if(len(fea[k]) > max_sequence_length) and max_sequence_length>0:
fea_chunked = []
lab_chunked = []
for i in range((len(fea[k]) + max_sequence_length - 1) // max_sequence_length):
if(len(fea[k][i * max_sequence_length:]) > max_sequence_length + (max_sequence_length/4)):
fea_chunked.append(fea[k][i * max_sequence_length:(i + 1) * max_sequence_length])
if not fea_only:
lab_chunked.append(lab[k][i * max_sequence_length:(i + 1) * max_sequence_length])
else:
lab_chunked.append(np.zeros((fea[k][i * max_sequence_length:(i + 1) * max_sequence_length].shape[0],)))
else:
fea_chunked.append(fea[k][i * max_sequence_length:])
if not fea_only:
lab_chunked.append(lab[k][i * max_sequence_length:])
else:
lab_chunked.append(np.zeros((fea[k][i * max_sequence_length:].shape[0],)))
break
for j in range(0, len(fea_chunked)):
fea_conc.append(fea_chunked[j])
lab_conc.append(lab_chunked[j])
snt_name.append(k+'_split'+str(j))
else:
fea_conc.append(fea[k])
if not fea_only:
lab_conc.append(lab[k])
else:
lab_conc.append(np.zeros((fea[k].shape[0],)))
snt_name.append(k)
tmp+=1
fea_zipped = zip(fea_conc,lab_conc)
fea_sorted = sorted(fea_zipped, key=lambda x: x[0].shape[0])
fea_conc,lab_conc = zip(*fea_sorted)
for entry in fea_conc:
end_snt=end_snt+entry.shape[0]
end_index.append(end_snt)
fea_conc=np.concatenate(fea_conc)
lab_conc=np.concatenate(lab_conc)
return [snt_name,fea_conc,lab_conc,np.asarray(end_index)]
def context_window_old(fea,left,right):
N_row=fea.shape[0]
N_fea=fea.shape[1]
frames = np.empty((N_row-left-right, N_fea*(left+right+1)))
for frame_index in range(left,N_row-right):
right_context=fea[frame_index+1:frame_index+right+1].flatten() # right context
left_context=fea[frame_index-left:frame_index].flatten() # left context
current_frame=np.concatenate([left_context,fea[frame_index],right_context])
frames[frame_index-left]=current_frame
return frames
def context_window(fea,left,right):
N_elem=fea.shape[0]
N_fea=fea.shape[1]
fea_conc=np.empty([N_elem,N_fea*(left+right+1)])
index_fea=0
for lag in range(-left,right+1):
fea_conc[:,index_fea:index_fea+fea.shape[1]]=np.roll(fea,lag,axis=0)
index_fea=index_fea+fea.shape[1]
fea_conc=fea_conc[left:fea_conc.shape[0]-right]
return fea_conc
def load_chunk(fea_scp,fea_opts,lab_folder,lab_opts,left,right,max_sequence_length, output_folder,fea_only=False):
# open the file
[data_name,data_set,data_lab,end_index]=load_dataset(fea_scp,fea_opts,lab_folder,lab_opts,left,right, max_sequence_length, output_folder, fea_only)
# Context window
if left!=0 or right!=0:
data_set=context_window(data_set,left,right)
end_index=end_index-left
end_index[-1]=end_index[-1]-right
# mean and variance normalization
data_set=(data_set-np.mean(data_set,axis=0))/np.std(data_set,axis=0)
# Label processing
data_lab=data_lab-data_lab.min()
if right>0:
data_lab=data_lab[left:-right]
else:
data_lab=data_lab[left:]
data_set=np.column_stack((data_set, data_lab))
return [data_name,data_set,end_index]
def load_counts(class_counts_file):
with open(class_counts_file) as f:
row = next(f).strip().strip('[]').strip()
counts = np.array([ np.float32(v) for v in row.split() ])
return counts
def read_lab_fea(cfg_file,fea_only,shared_list,output_folder):
# Reading chunk-specific cfg file (first argument-mandatory file)
if not(os.path.exists(cfg_file)):
sys.stderr.write('ERROR: The config file %s does not exist!\n'%(cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Reading some cfg parameters
to_do=config['exp']['to_do']
if to_do=='train':
max_seq_length=int(config['batches']['max_seq_length_train']) #*(int(info_file[-13:-10])+1) # increasing over the epochs
if to_do=='valid':
max_seq_length=int(config['batches']['max_seq_length_valid'])
if to_do=='forward':
max_seq_length=-1 # do to break forward sentences
[fea_dict,lab_dict,arch_dict]=dict_fea_lab_arch(config)
[cw_left_max,cw_right_max]=compute_cw_max(fea_dict)
fea_index=0
cnt_fea=0
for fea in fea_dict.keys():
# reading the features
fea_scp=fea_dict[fea][1]
fea_opts=fea_dict[fea][2]
cw_left=int(fea_dict[fea][3])
cw_right=int(fea_dict[fea][4])
cnt_lab=0
# Production case, we don't have labels (lab_name = none)
if fea_only:
lab_dict.update({'lab_name':'none'})
for lab in lab_dict.keys():
# Production case, we don't have labels (lab_name = none)
if fea_only:
lab_folder=None
lab_opts=None
else:
lab_folder=lab_dict[lab][1]
lab_opts=lab_dict[lab][2]
[data_name_fea,data_set_fea,data_end_index_fea]=load_chunk(fea_scp,fea_opts,lab_folder,lab_opts,cw_left,cw_right,max_seq_length, output_folder, fea_only)
# making the same dimenion for all the features (compensating for different context windows)
labs_fea=data_set_fea[cw_left_max-cw_left:data_set_fea.shape[0]-(cw_right_max-cw_right),-1]
data_set_fea=data_set_fea[cw_left_max-cw_left:data_set_fea.shape[0]-(cw_right_max-cw_right),0:-1]
data_end_index_fea=data_end_index_fea-(cw_left_max-cw_left)
data_end_index_fea[-1]=data_end_index_fea[-1]-(cw_right_max-cw_right)
if cnt_fea==0 and cnt_lab==0:
data_set=data_set_fea
labs=labs_fea
data_end_index=data_end_index_fea
data_end_index=data_end_index_fea
data_name=data_name_fea
fea_dict[fea].append(fea_index)
fea_index=fea_index+data_set_fea.shape[1]
fea_dict[fea].append(fea_index)
fea_dict[fea].append(fea_dict[fea][6]-fea_dict[fea][5])
else:
if cnt_fea==0:
labs=np.column_stack((labs,labs_fea))
if cnt_lab==0:
data_set=np.column_stack((data_set,data_set_fea))
fea_dict[fea].append(fea_index)
fea_index=fea_index+data_set_fea.shape[1]
fea_dict[fea].append(fea_index)
fea_dict[fea].append(fea_dict[fea][6]-fea_dict[fea][5])
# Checks if lab_names are the same for all the features
if not(data_name==data_name_fea):
sys.stderr.write('ERROR: different sentence ids are detected for the different features. Plase check again input feature lists"\n')
sys.exit(0)
# Checks if end indexes are the same for all the features
if not(data_end_index==data_end_index_fea).all():
sys.stderr.write('ERROR end_index must be the same for all the sentences"\n')
sys.exit(0)
cnt_lab=cnt_lab+1
cnt_fea=cnt_fea+1
cnt_lab=0
if not fea_only:
for lab in lab_dict.keys():
lab_dict[lab].append(data_set.shape[1]+cnt_lab)
cnt_lab=cnt_lab+1
data_set=np.column_stack((data_set,labs))
# check automatically if the model is sequential
seq_model=is_sequential_dict(config,arch_dict)
# Randomize if the model is not sequential
if not(seq_model) and to_do!='forward':
np.random.shuffle(data_set)
# Split dataset in many part. If the dataset is too big, we can have issues to copy it into the shared memory (due to pickle limits)
#N_split=10
#data_set=np.array_split(data_set, N_split)
# Adding all the elements in the shared list
shared_list.append(data_name)
shared_list.append(data_end_index)
shared_list.append(fea_dict)
shared_list.append(lab_dict)
shared_list.append(arch_dict)
shared_list.append(data_set)
# The following libraries are copied from kaldi-io-for-python project (https://github.com/vesis84/kaldi-io-for-python)
# Copyright 2014-2016 Brno University of Technology (author: Karel Vesely)
# Licensed under the Apache License, Version 2.0 (the "License")
#################################################
# Define all custom exceptions,
class UnsupportedDataType(Exception): pass
class UnknownVectorHeader(Exception): pass
class UnknownMatrixHeader(Exception): pass
class BadSampleSize(Exception): pass
class BadInputFormat(Exception): pass
class SubprocessFailed(Exception): pass
#################################################
# Data-type independent helper functions,
def open_or_fd(file, output_folder,mode='rb'):
""" fd = open_or_fd(file)
Open file, gzipped file, pipe, or forward the file-descriptor.
Eventually seeks in the 'file' argument contains ':offset' suffix.
"""
offset = None
try:
# strip 'ark:' prefix from r{x,w}filename (optional),
if re.search('^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:', file):
(prefix,file) = file.split(':',1)
# separate offset from filename (optional),
if re.search(':[0-9]+$', file):
(file,offset) = file.rsplit(':',1)
# input pipe?
if file[-1] == '|':
fd = popen(file[:-1], output_folder,'rb') # custom,
# output pipe?
elif file[0] == '|':
fd = popen(file[1:], output_folder,'wb') # custom,
# is it gzipped?
elif file.split('.')[-1] == 'gz':
fd = gzip.open(file, mode)
# a normal file...
else:
fd = open(file, mode)
except TypeError:
# 'file' is opened file descriptor,
fd = file
# Eventually seek to offset,
if offset != None: fd.seek(int(offset))
return fd
# based on '/usr/local/lib/python3.4/os.py'
def popen(cmd, output_folder,mode="rb"):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
import subprocess, io, threading
# cleanup function for subprocesses,
def cleanup(proc, cmd):
ret = proc.wait()
if ret > 0:
raise SubprocessFailed('cmd %s returned %d !' % (cmd,ret))
return
# text-mode,
if mode == "r":
err=open(output_folder+'/log.log',"a")
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=err)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdout)
elif mode == "w":
err=open(output_folder+'/log.log',"a")
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,stderr=err)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdin)
# binary,
elif mode == "rb":
err=open(output_folder+'/log.log',"a")
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=err)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdout
elif mode == "wb":
err=open(output_folder+'/log.log',"a")
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,stderr=err)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdin
# sanity,
else:
raise ValueError("invalid mode %s" % mode)
def read_key(fd):
""" [key] = read_key(fd)
Read the utterance-key from the opened ark/stream descriptor 'fd'.
"""
key = ''
while 1:
char = fd.read(1).decode("latin1")
if char == '' : break
if char == ' ' : break
key += char
key = key.strip()
if key == '': return None # end of file,
assert(re.match('^\S+$',key) != None) # check format (no whitespace!)
return key
#################################################
# Integer vectors (alignments, ...),
def read_ali_ark(file_or_fd,output_folder):
""" Alias to 'read_vec_int_ark()' """
return read_vec_int_ark(file_or_fd,output_folder)
def read_vec_int_ark(file_or_fd,output_folder):
""" generator(key,vec) = read_vec_int_ark(file_or_fd)
Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_int_ark(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
key = read_key(fd)
while key:
ali = read_vec_int(fd,output_folder)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_vec_int(file_or_fd,output_folder):
""" [int-vec] = read_vec_int(file_or_fd)
Read kaldi integer vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd,output_folder)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
if vec_size == 0:
return np.array([], dtype='int32')
# Elements from int32 vector are sored in tuples: (sizeof(int32), value),
vec = np.frombuffer(fd.read(vec_size*5), dtype=[('size','int8'),('value','int32')], count=vec_size)
assert(vec[0]['size'] == 4) # int32 size,
ans = vec[:]['value'] # values are in 2nd column,
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=int)
if fd is not file_or_fd : fd.close() # cleanup
return ans
# Writing,
def write_vec_int(file_or_fd, output_folder, v, key=''):
""" write_vec_int(f, v, key='')
Write a binary kaldi integer vector to filename or stream.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_int(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, output_folder, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# dim,
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v.shape[0]))
# data,
for i in range(len(v)):
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v[i])) # binary,
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float vectors (confidences, ivectors, ...),
# Reading,
def read_vec_flt_scp(file_or_fd,output_folder):
""" generator(key,mat) = read_vec_flt_scp(file_or_fd)
Returns generator of (key,vector) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,vec in kaldi_io.read_vec_flt_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
vec = read_vec_flt(rxfile)
yield key, vec
finally:
if fd is not file_or_fd : fd.close()
def read_vec_flt_ark(file_or_fd,output_folder):
""" generator(key,vec) = read_vec_flt_ark(file_or_fd)
Create generator of (key,vector<float>) tuples, reading from an ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_flt_ark(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
key = read_key(fd)
while key:
ali = read_vec_flt(fd)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_vec_flt(file_or_fd,output_folder):
""" [flt-vec] = read_vec_flt(file_or_fd)
Read kaldi float vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd,output_folder)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
return _read_vec_flt_binary(fd)
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=float)
if fd is not file_or_fd : fd.close() # cleanup
return ans
def _read_vec_flt_binary(fd):
header = fd.read(3).decode()
if header == 'FV ' : sample_size = 4 # floats
elif header == 'DV ' : sample_size = 8 # doubles
else : raise UnknownVectorHeader("The header contained '%s'" % header)
assert (sample_size > 0)
# Dimension,
assert (fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
if vec_size == 0:
return np.array([], dtype='float32')
# Read whole vector,
buf = fd.read(vec_size * sample_size)
if sample_size == 4 : ans = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : ans = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
return ans
# Writing,
def write_vec_flt(file_or_fd, output_folder, v, key=''):
""" write_vec_flt(f, v, key='')
Write a binary kaldi vector to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_flt(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd,output_folder, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if v.dtype == 'float32': fd.write('FV '.encode())
elif v.dtype == 'float64': fd.write('DV '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % v.dtype)
# Dim,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, v.shape[0])) # dim
# Data,
fd.write(v.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float matrices (features, transformations, ...),
# Reading,
def read_mat_scp(file_or_fd,output_folder):
""" generator(key,mat) = read_mat_scp(file_or_fd)
Returns generator of (key,matrix) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,mat in kaldi_io.read_mat_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
mat = read_mat(rxfile,output_folder)
yield key, mat
finally:
if fd is not file_or_fd : fd.close()
def read_mat_ark(file_or_fd,output_folder):
""" generator(key,mat) = read_mat_ark(file_or_fd)
Returns generator of (key,matrix) tuples, read from ark file/stream.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the ark:
for key,mat in kaldi_io.read_mat_ark(file):
...
Read ark to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_ark(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
key = read_key(fd)
while key:
mat = read_mat(fd,output_folder)
yield key, mat
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_mat(file_or_fd,output_folder):
""" [mat] = read_mat(file_or_fd)
Reads single kaldi matrix, supports ascii and binary.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
binary = fd.read(2).decode()
if binary == '\0B' :
mat = _read_mat_binary(fd)
else:
assert(binary == ' [')
mat = _read_mat_ascii(fd)
finally:
if fd is not file_or_fd: fd.close()
return mat
def _read_mat_binary(fd):
# Data type
header = fd.read(3).decode()
# 'CM', 'CM2', 'CM3' are possible values,
if header.startswith('CM'): return _read_compressed_mat(fd, header)
elif header == 'FM ': sample_size = 4 # floats
elif header == 'DM ': sample_size = 8 # doubles
else: raise UnknownMatrixHeader("The header contained '%s'" % header)
assert(sample_size > 0)
# Dimensions
s1, rows, s2, cols = np.frombuffer(fd.read(10), dtype='int8,int32,int8,int32', count=1)[0]
# Read whole matrix
buf = fd.read(rows * cols * sample_size)
if sample_size == 4 : vec = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : vec = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
mat = np.reshape(vec,(rows,cols))
return mat
def _read_mat_ascii(fd):
rows = []
while 1:
line = fd.readline().decode()
if (len(line) == 0) : raise BadInputFormat # eof, should not happen!
if len(line.strip()) == 0 : continue # skip empty line
arr = line.strip().split()
if arr[-1] != ']':
rows.append(np.array(arr,dtype='float32')) # not last line
else:
rows.append(np.array(arr[:-1],dtype='float32')) # last line
mat = np.vstack(rows)
return mat
def _read_compressed_mat(fd, format):
""" Read a compressed matrix,
see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h
methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),
"""
assert(format == 'CM ') # The formats CM2, CM3 are not supported...
# Format of header 'struct',
global_header = np.dtype([('minvalue','float32'),('range','float32'),('num_rows','int32'),('num_cols','int32')]) # member '.format' is not written,
per_col_header = np.dtype([('percentile_0','uint16'),('percentile_25','uint16'),('percentile_75','uint16'),('percentile_100','uint16')])
# Read global header,
globmin, globrange, rows, cols = np.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
# The data is structed as [Colheader, ... , Colheader, Data, Data , .... ]
# { cols }{ size }
col_headers = np.frombuffer(fd.read(cols*8), dtype=per_col_header, count=cols)
col_headers = np.array([np.array([x for x in y]) * globrange * 1.52590218966964e-05 + globmin for y in col_headers], dtype=np.float32)
data = np.reshape(np.frombuffer(fd.read(cols*rows), dtype='uint8', count=cols*rows), newshape=(cols,rows)) # stored as col-major,
mat = np.zeros((cols,rows), dtype='float32')
p0 = col_headers[:, 0].reshape(-1, 1)
p25 = col_headers[:, 1].reshape(-1, 1)
p75 = col_headers[:, 2].reshape(-1, 1)
p100 = col_headers[:, 3].reshape(-1, 1)
mask_0_64 = (data <= 64)
mask_193_255 = (data > 192)
mask_65_192 = (~(mask_0_64 | mask_193_255))
mat += (p0 + (p25 - p0) / 64. * data) * mask_0_64.astype(np.float32)
mat += (p25 + (p75 - p25) / 128. * (data - 64)) * mask_65_192.astype(np.float32)
mat += (p75 + (p100 - p75) / 63. * (data - 192)) * mask_193_255.astype(np.float32)
return mat.T # transpose! col-major -> row-major,
# Writing,
def write_mat(output_folder,file_or_fd, m, key=''):
""" write_mat(f, m, key='')
Write a binary kaldi matrix to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename of opened file descriptor for writing,
m : the matrix to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the matrix.
Example of writing single matrix:
kaldi_io.write_mat(filename, mat)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,mat in dict.iteritems():
kaldi_io.write_mat(f, mat, key=key)
"""
fd = open_or_fd(file_or_fd, output_folder, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if m.dtype == 'float32': fd.write('FM '.encode())
elif m.dtype == 'float64': fd.write('DM '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % m.dtype)
# Dims,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[0])) # rows
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[1])) # cols
# Data,
fd.write(m.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# 'Posterior' kaldi type (posteriors, confusion network, nnet1 training targets, ...)
# Corresponds to: vector<vector<tuple<int,float> > >
# - outer vector: time axis
# - inner vector: records at the time
# - tuple: int = index, float = value
#
def read_cnet_ark(file_or_fd,output_folder):
""" Alias of function 'read_post_ark()', 'cnet' = confusion network """
return read_post_ark(file_or_fd,output_folder)
def read_post_rxspec(file_):
""" adaptor to read both 'ark:...' and 'scp:...' inputs of posteriors,
"""
if file_.startswith("ark:"):
return read_post_ark(file_)
elif file_.startswith("scp:"):
return read_post_scp(file_)
else:
print("unsupported intput type: %s" % file_)
print("it should begint with 'ark:' or 'scp:'")
sys.exit(1)
def read_post_scp(file_or_fd,output_folder):
""" generator(key,post) = read_post_scp(file_or_fd)
Returns generator of (key,post) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,post in kaldi_io.read_post_scp(file):
...
Read scp to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_scp(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
post = read_post(rxfile)
yield key, post
finally:
if fd is not file_or_fd : fd.close()
def read_post_ark(file_or_fd,output_folder):
""" generator(key,vec<vec<int,float>>) = read_post_ark(file)
Returns generator of (key,posterior) tuples, read from ark file.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Iterate the ark:
for key,post in kaldi_io.read_post_ark(file):
...
Read ark to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
key = read_key(fd)
while key:
post = read_post(fd)
yield key, post
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_post(file_or_fd,output_folder):
""" [post] = read_post(file_or_fd)
Reads single kaldi 'Posterior' in binary format.
The 'Posterior' is C++ type 'vector<vector<tuple<int,float> > >',
the outer-vector is usually time axis, inner-vector are the records
at given time, and the tuple is composed of an 'index' (integer)
and a 'float-value'. The 'float-value' can represent a probability
or any other numeric value.
Returns vector of vectors of tuples.
"""
fd = open_or_fd(file_or_fd,output_folder)
ans=[]
binary = fd.read(2).decode(); assert(binary == '\0B'); # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
outer_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
# Loop over 'outer-vector',
for i in range(outer_vec_size):
assert(fd.read(1).decode() == '\4'); # int-size
inner_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of records for frame (or bin)
data = np.frombuffer(fd.read(inner_vec_size*10), dtype=[('size_idx','int8'),('idx','int32'),('size_post','int8'),('post','float32')], count=inner_vec_size)
assert(data[0]['size_idx'] == 4)
assert(data[0]['size_post'] == 4)
ans.append(data[['idx','post']].tolist())
if fd is not file_or_fd: fd.close()
return ans
#################################################
# Kaldi Confusion Network bin begin/end times,
# (kaldi stores CNs time info separately from the Posterior).
#
def read_cntime_ark(file_or_fd,output_folder):
""" generator(key,vec<tuple<float,float>>) = read_cntime_ark(file_or_fd)
Returns generator of (key,cntime) tuples, read from ark file.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Iterate the ark:
for key,time in kaldi_io.read_cntime_ark(file):
...
Read ark to a 'dictionary':
d = { key:time for key,time in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd,output_folder)
try:
key = read_key(fd)
while key:
cntime = read_cntime(fd)
yield key, cntime
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_cntime(file_or_fd,output_folder):
""" [cntime] = read_cntime(file_or_fd)
Reads single kaldi 'Confusion Network time info', in binary format:
C++ type: vector<tuple<float,float> >.
(begin/end times of bins at the confusion network).
Binary layout is '<num-bins> <beg1> <end1> <beg2> <end2> ...'
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Returns vector of tuples.
"""
fd = open_or_fd(file_or_fd,output_folder)
binary = fd.read(2).decode(); assert(binary == '\0B'); # assuming it's binary
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
data = np.frombuffer(fd.read(vec_size*10), dtype=[('size_beg','int8'),('t_beg','float32'),('size_end','int8'),('t_end','float32')], count=vec_size)
assert(data[0]['size_beg'] == 4)
assert(data[0]['size_end'] == 4)
ans = data[['t_beg','t_end']].tolist() # Return vector of tuples (t_beg,t_end),
if fd is not file_or_fd : fd.close()
return ans
#################################################
# Segments related,
#
# Segments as 'Bool vectors' can be handy,
# - for 'superposing' the segmentations,
# - for frame-selection in Speaker-ID experiments,
def read_segments_as_bool_vec(segments_file):
""" [ bool_vec ] = read_segments_as_bool_vec(segments_file)
using kaldi 'segments' file for 1 wav, format : '<utt> <rec> <t-beg> <t-end>'
- t-beg, t-end is in seconds,
- assumed 100 frames/second,
"""
segs = np.loadtxt(segments_file, dtype='object,object,f,f', ndmin=1)
# Sanity checks,
assert(len(segs) > 0) # empty segmentation is an error,
assert(len(np.unique([rec[1] for rec in segs ])) == 1) # segments with only 1 wav-file,
# Convert time to frame-indexes,
start = np.rint([100 * rec[2] for rec in segs]).astype(int)
end = np.rint([100 * rec[3] for rec in segs]).astype(int)
# Taken from 'read_lab_to_bool_vec', htk.py,
frms = np.repeat(np.r_[np.tile([False,True], len(end)), False],
np.r_[np.c_[start - np.r_[0, end[:-1]], end-start].flat, 0])
assert np.sum(end-start) == np.sum(frms)
return frms
|
dataprocessor.py |
import os
import nltk
import csv
import pickle
import urllib2
import numpy as np
import ka
from multiprocessing import Process, Lock
dirs = ["data/aclImdb/test/pos", "data/aclImdb/test/neg", "data/aclImdb/train/pos", "data/aclImdb/train/neg"]
url = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
def run(max_seq_length, max_vocab_size):
if not os.path.exists("data/"):
os.makedirs("data/")
if not os.path.exists("data/checkpoints/"):
os.makedirs("data/checkpoints")
if not os.path.isdir("data/aclImdb"):
print "Data not found, downloading dataset..."
fileName = downloadFile(url)
import tarfile
tfile = tarfile.open(fileName, 'r:gz')
print "Extracting dataset..."
tfile.extractall('data/')
tfile.close()
if os.path.exists("data/vocab.txt"):
print "vocab mapping found..."
else:
print "no vocab mapping found, running preprocessor..."
createVocab(dirs, max_vocab_size)
if not os.path.exists("data/processed"):
os.makedirs("data/processed/")
print "No processed data file found, running preprocessor..."
else:
return
import vocabmapping
vocab = vocabmapping.VocabMapping()
dirCount = 0
processes = []
lock = Lock()
for d in dirs:
print "Procesing data with process: " + str(dirCount)
p = Process(target=createProcessedDataFile, args=(vocab, d, dirCount, max_seq_length, lock))
p.start()
processes.append(p)
dirCount += 1
for p in processes:
if p.is_alive():
p.join()
'''
To speed up the data processing (I probably did it way too inefficiently),
I decided to split the task in n processes, where n is the number of directories
A lock was used to ensure while writing to std.out bad things don't happen.
'''
def createProcessedDataFile(vocab_mapping, directory, pid, max_seq_length, lock):
count = 0
data = np.array([i for i in range(max_seq_length + 2)])
for f in os.listdir(directory):
count += 1
if count % 100 == 0:
lock.acquire()
print "Processing: " + f + " the " + str(count) + "th file... on process: " + str(pid)
lock.release()
with open(os.path.join(directory, f), 'r') as review:
tokens = tokenize(review.read().lower())
numTokens = len(tokens)
score = findBetween(f, "_", ".txt")
indices = [vocab_mapping.getIndex(j) for j in tokens]
#pad sequence to max length
if len(indices) < max_seq_length:
indices = indices + [vocab_mapping.getIndex("<PAD>") for i in range(max_seq_length - len(indices))]
else:
indices = indices[0:max_seq_length]
if "pos" in directory:
indices.append(1)
else:
indices.append(0)
indices.append(min(numTokens, max_seq_length))
assert len(indices) == max_seq_length + 2, str(len(indices))
data = np.vstack((data, indices))
indices = []
#remove first placeholder value
data = data[1::]
lock.acquire()
print "Saving data file{0} to disk...".format(str(pid))
lock.release()
saveData(data, pid)
#method from:
#http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http-using-python
def downloadFile(url):
file_name = os.path.join("data/", url.split('/')[-1])
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
return file_name
'''
This function tokenizes sentences
'''
def tokenize(text):
text = text.decode('utf-8')
return nltk.word_tokenize(text)
'''
taken from: http://stackoverflow.com/questions/3368969/find-string-between-two-substrings
finds the string between two substrings
'''
def findBetween( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
'''
Saves processed data numpy array
'''
def saveData(npArray, index):
name = "data{0}.npy".format(str(index))
outfile = os.path.join("data/processed/", name)
print "numpy array is: {0}x{1}".format(len(npArray), len(npArray[0]))
np.save(outfile, npArray)
'''
create vocab mapping file
'''
def createVocab(dirs, max_vocab_size):
print "Creating vocab mapping..."
dic = {}
for d in dirs:
indices = []
for f in os.listdir(d):
with open(os.path.join(d, f), 'r') as review:
tokens = tokenize(review.read().lower())
for t in tokens:
if t not in dic:
dic[t] = 1
else:
dic[t] += 1
d = {}
counter = 0
for w in sorted(dic, key=dic.get, reverse=True):
d[w] = counter
counter += 1
#take most frequent 50k tokens
if counter >=max_vocab_size:
break
#add out of vocab token and pad token
d["<UNK>"] = counter
counter +=1
d["<PAD>"] = counter
with open('data/vocab.txt', 'wb') as handle:
pickle.dump(d, handle)
|
api.py | """GitLab Webhook receiver.
The goal: receive web hook request from GtLab, run project tests,
respond with comment of status (make a comment to discussion) and
closing merge requests if tests failed.
"""
import os
import os.path
import re
import subprocess
import json
import logging
import logging.config
import falcon
import yaml
import requests
import tempfile
from multiprocessing import Queue, Process
conf = {}
merge_requests_queue = Queue()
push_queue = Queue()
with open('config.yml') as config_file:
conf.update(yaml.load(config_file))
conf['validate_regex'] = re.compile(conf['validate_regex'])
logging.config.dictConfig(conf['log_settings'])
re_gitlab_url = re.compile(r'https?://[\w.]+/')
re_repo_work_dir = re.compile(r'/([\w.-]+).git$')
install_dir = os.path.dirname(os.path.abspath(__file__))
class AuthMiddleware:
"""Simple auth by token."""
def process_request(self, req, resp):
"""Process each request."""
token = req.get_param('token', required=True)
if token != conf['access_key']:
raise falcon.HTTPUnauthorized(
'Authentication Required',
'Please provide auth token as part of request.')
class RequireJSON:
"""Check incoming requests type."""
error_msg = falcon.HTTPNotAcceptable(
'This API only supports responses encoded as JSON.',
href='http://docs.examples.com/api/json')
def process_request(self, req, resp):
"""Process each request."""
if not req.client_accepts_json:
raise self.error_msg
if req.method in ('POST', 'PUT'):
if 'application/json' not in req.content_type:
raise self.error_msg
class JSONTranslator:
"""Process JSON of incoming requests."""
def process_request(self, req, resp):
"""Process each request."""
if req.content_length in (None, 0):
return
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body',
'A valid JSON document is required.')
try:
req.context['payload'] = json.loads(body.decode('utf-8'))
except Exception as er:
raise falcon.HTTPError(falcon.HTTP_753, 'Malformed JSON', str(er))
def max_body(limit):
"""Max body size hook."""
def hook(req, resp, resource, params):
length = req.content_length
if length is not None and length > limit:
msg = ('The size of the request is too large. The body must not '
'exceed ' + str(limit) + ' bytes in length.')
raise falcon.HTTPRequestEntityTooLarge(
'Request body is too large', msg)
return hook
def run_cmd(cmd):
"""Run command in shell."""
try:
output = subprocess.check_output(
cmd, executable='/bin/bash', shell=True,
stderr=subprocess.STDOUT, universal_newlines=True)
except subprocess.CalledProcessError as er:
return False, er.output
else:
return True, output
class GitLabWebHookReceiver:
"""GitLab Web hook receiver."""
def __init__(self):
"""Standart init method."""
self.log = logging.getLogger(self.__class__.__name__)
@falcon.before(max_body(1024 * 1024))
def on_post(self, req, resp):
"""Process POST requet from GitLab."""
try:
payload = req.context['payload']
except KeyError:
raise falcon.HTTPBadRequest(
'Missing thing',
'A thing must be submitted in the request body.')
self.log.debug('received data: %s', payload, extra=req.env)
if payload['object_kind'] == 'merge_request':
merge_requests_queue.put_nowait(payload)
elif payload['object_kind'] == 'push':
push_queue.put_nowait(payload)
resp.status = falcon.HTTP_201
class GitLabAPI:
"""Simple class for using GitLab API."""
def __init__(self, repo_url, clone_url, project_id, branch, action_type,
merge_id=None):
"""Standart init method."""
self.token = conf['gitlab_auth_token']
self.session = None
self.repo_url = repo_url
self.clone_url = clone_url
self.project_id = project_id
self.branch = branch
self.action_type = action_type
self.merge_id = merge_id
self.api_url = re_gitlab_url.match(self.repo_url).group(0) + 'api/v3'
self.log = logging.getLogger(self.__class__.__name__)
self.workdir = None
self.repo_dir = None
def _prepare_request(self, action):
"""Prepare request to GitLab server, build API endpoint URL."""
if self.action_type == 'merge_request':
method, endpoint = action.split('_')
url = '{api_url}/projects/{project_id}'
url += '/merge_request/{merge_id}/' + endpoint
url = url.format(api_url=self.api_url, project_id=self.project_id,
merge_id=self.merge_id)
return method.upper(), url
def _make_request(self, action, payload=None):
"""Make a request to GitLab server."""
try:
method, url = self._prepare_request(action)
req = requests.Request(
method, url, headers={'PRIVATE-TOKEN': self.token},
data=payload)
if self.session is None:
self.session = requests.Session()
response = self.session.send(req.prepare())
if response.status_code > 201:
raise Exception('bad response status code: %d',
response.status_code)
except Exception as er:
self.log.error('action: %s, url: %s, error message: %s',
action, url, er)
else:
return response.json()
def get_merge_request_changes(self):
"""Get merge request changes."""
return self._make_request('get_changes')
def get_merge_request_commits(self):
"""Get merge request commits."""
return self._make_request('get_commits')
def post_merge_request_comment(self, msg):
"""Comment merge request."""
return self._make_request('post_comments', {'note': msg})
def close_merge_request(self):
"""Close merge request."""
return self._make_request('put_', {'state_event': 'close'})
def validate_merge_request_commits(self):
"""Validate merge requests commits."""
bad_messages = []
for commit in self.get_merge_request_commits():
if not conf['validate_regex'].match(commit['message']):
bad_messages.append(commit['message'])
return len(bad_messages) == 0, '\n'.join(bad_messages)
def run_commits_messages_validator(self):
"""Run self.validate_merge_request_commits."""
valid, info = self.validate_merge_request_commits()
if not valid:
msg = 'Merge request commits have invalid messages:\n'
msg += '<pre>%s</pre>' % info
self.post_merge_request_comment(msg)
return valid
def prepare_workdir(self):
"""Clone upstream repo."""
os.chdir(install_dir)
if os.path.isdir(conf['git_workdir']):
found = re_repo_work_dir.search(self.clone_url)
if found:
self.workdir = found.group(1)
else:
self.log.error(
'bogus Git repo URL %s', self.clone_url)
return False
else:
self.log.error(
'directory %s does not exist', conf['git_workdir'])
return False
self.repo_dir = os.path.join(conf['git_workdir'], self.workdir)
if os.path.isdir(self.repo_dir):
# clean up git workdir, reset changes, fetch updates
os.chdir(self.repo_dir)
ok, output = run_cmd(
'git checkout -- . && git clean -fd && git pull && '
'git checkout {branch}'.format(branch=self.branch))
if not ok:
self.log.error(output)
else:
# initialize new repo dir: clone repo
os.chdir(conf['git_workdir'])
cmd = 'git clone -q {clone_url} && cd {workdir} && \
git checkout {branch}'.format(
clone_url=self.clone_url, branch=self.branch,
workdir=self.workdir)
ok, output = run_cmd(cmd)
if not ok:
self.log.error(
'can not clone repo %s into %s <%s>',
self.clone_url, self.repo_dir, output)
return False
if self.action_type == 'merge_request':
self.apply_patch()
return True
def run_test_cmd(self):
"""Run test command."""
ok, output = run_cmd(conf['test_cmd'])
if not ok:
msg = 'Test command failed, '
msg += 'please checkout output of **%s**:\n' % conf['test_cmd']
msg += '<pre>%s</pre>' % output
self.log.warning(msg)
self.post_merge_request_comment(msg)
return ok
def run_on_push_cmd(self):
"""Run command on push events."""
for targets in conf['run_on_push']:
for branch_pattern, cmd in targets.items():
found = re.match(branch_pattern + '$', self.branch)
if found:
ok, output = run_cmd(cmd)
if not ok:
msg = 'On push command failed, '
msg += 'please checkout output of **%s**:\n' % cmd
msg += '<pre>%s</pre>' % output
self.log.warning(msg)
return ok
def apply_patch(self):
"""Apply patch."""
diff = ''
for change in self.get_merge_request_changes()['changes']:
diff += change['diff']
with tempfile.NamedTemporaryFile(
'w', encoding='utf-8', delete=False) as f:
f.write(diff)
ok, output = run_cmd('patch -p 1 < {file}'.format(file=f.name))
if not ok:
self.log.error(output)
os.unlink(f.name)
def process_merge_request():
"""Process each merge request.
This is blocking and endless loop operation: we awaiting for a new merge
requst payload in queue, so this is function should be run in
separate process.
"""
while True:
item = merge_requests_queue.get(block=True)
if item['object_attributes']['state'] in ('reopened', 'opened'):
gitlab_api = GitLabAPI(
repo_url=item['repository']['homepage'],
clone_url=item['repository']['url'],
project_id=item['object_attributes']['target_project_id'],
branch=item['object_attributes']['target_branch'],
merge_id=item['object_attributes']['id'],
action_type='merge_request'
)
tests_results = [True]
if conf['validate_commit_messages']:
tests_results.append(
gitlab_api.run_commits_messages_validator())
if conf['run_tests']:
done = gitlab_api.prepare_workdir()
if done:
tests_results.append(gitlab_api.run_test_cmd())
if not all(tests_results):
gitlab_api.close_merge_request()
def process_push():
"""Process push events.
This is blocking and endless loop operation: we awaiting for a new
incoming push events and process them, so this is function should be run
in separate process..
"""
while True:
item = push_queue.get(block=True)
gitlab_api = GitLabAPI(
repo_url=item['repository']['homepage'],
clone_url=item['repository']['url'],
project_id=item['project_id'],
branch=item['ref'].split('refs/heads/')[1],
merge_id=None,
action_type='push'
)
if conf['process_push']:
done = gitlab_api.prepare_workdir()
if done:
gitlab_api.run_on_push_cmd()
def run_webapp():
"""Run web app."""
from wsgiref import simple_server
httpd = simple_server.make_server(
conf['listen_address'], conf['listen_port'], api)
httpd.serve_forever()
gitlab_webhook_receiver = GitLabWebHookReceiver()
api = application = falcon.API(middleware=[
AuthMiddleware(), RequireJSON(), JSONTranslator()])
api.add_route('/gitlab/webhook', gitlab_webhook_receiver)
if __name__ == '__main__':
web_app = Process(target=run_webapp)
web_app.daemon = True
web_app.start()
if conf['validate_commit_messages'] or conf['run_tests']:
on_merge_request = Process(target=process_merge_request)
on_merge_request.daemon = True
on_merge_request.start()
if conf['run_on_push']:
on_push = Process(target=process_push)
on_push.daemon = True
on_push.start()
web_app.join()
|
object_detector.py | import cv2
import numpy as np
import threading
from .communication_utils import *
from .database_wrapper import delete_progress as dbw_delete_progress
from .database_wrapper import *
from .serialization import *
# This file represents the backend Object Detector.
def detect_objects(data: dict) -> (int, dict):
"""
Run object detection on given clips inside given interval.
If no time interval is provided full clips will be analyzed.
:param data: Clip id:s and optional start and end time.
:return: Process id.
"""
try:
clip_ids = data[CLIP_IDS]
rate = data[RATE]
except KeyError:
return 400, {} # Bad request
# Get time interval. Will be None if parameter is not provided.
start_time = date_str_to_datetime(date_str=data.get(START_TIME))
end_time = date_str_to_datetime(date_str=data.get(END_TIME))
# Create a progress object to keep track of object detection.
pid = create_progress(total=len(clip_ids))
# Start a new thread for an object detector.
od = ObjectDetector()
od_thread = threading.Thread(target=od.run_object_detection, args=(clip_ids, pid, rate, start_time, end_time))
od_thread.start()
return 200, {PROGRESS_ID: pid}
def get_progress(data: dict) -> (int, dict):
"""
Get progress of object detection.
:param data: Progress id.
:return: Progress (float).
"""
try:
pid = data[PROGRESS_ID]
except KeyError:
return 400, {} # Bad request
p = get_progress_by_id(pid=pid)
if p is None:
return 204, {} # No content
return 200, {TOTAL: p.total, CURRENT: p.current}
def delete_progress(data: dict) -> (int, dict):
"""
Deletes a progress object.
:param data: Progress id.
:return: Status code, empty.
"""
try:
pid = data[PROGRESS_ID]
except KeyError:
return 400, {} # Bad request
dbw_delete_progress(pid=pid)
return 200, {}
class ObjectDetector:
"""
Modified the following example:
https://pysource.com/2019/06/27/yolo-object-detection-using-opencv-with-python/
"""
def __init__(self, yolov: str = 'yolov3-tiny', debug: bool = False): # debug
"""
Loads YOLO.
:param yolov: YOLO version (weights and cfg must be in utils).
:param debug: Display all processed frames for user.
"""
self.net = cv2.dnn.readNet("backend{0}utils{0}{1}.weights".format(os.path.sep, yolov),
"backend{0}utils{0}{1}.cfg".format(os.path.sep, yolov))
self.classes = []
with open("backend{0}utils{0}coco.names".format(os.path.sep), "r") as f:
self.classes = [line.strip() for line in f.readlines()]
self.layer_names = self.net.getLayerNames()
self.output_layers = [self.layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
self.debug = debug # debug
if debug:
self.colors = np.random.uniform(0, 255, size=(len(self.classes), 3)) # debug
def run_object_detection(self, cids: List[int], pid: int, rate: int, start_time: Optional[timezone.datetime],
end_time: Optional[timezone.datetime]) -> None:
"""
Runs object detection on the given list of clips and saves result to the database.
:param cids: List of clip id:s.
:param pid: Progress id.
:param rate: Seconds between each analyzed frame.
:param start_time: Start time of object detection.
:param end_time: End time of object detection.
"""
# Get all clips
clips = [get_clip_by_id(cid=cid) for cid in cids]
for clip in clips:
# Get path to clip
file_path = replace_sep(str(clip))
# Calculate start and end based on given start and end time
if start_time is None:
start_sec = 0
detection_start_time = clip.start_time
else:
start_sec = max(int((start_time - clip.start_time).total_seconds()), 0)
detection_start_time = max(start_time, clip.start_time)
if end_time is None:
end_sec = None
detection_end_time = clip.end_time
else:
if end_time > clip.end_time:
end_sec = max(int((clip.end_time - clip.start_time).total_seconds()), start_sec)
detection_end_time = clip.end_time
else:
end_sec = max(int((end_time - clip.start_time).total_seconds()), start_sec)
detection_end_time = end_time
if end_sec is None or start_sec < end_sec:
# Run object detection on clip
res = self.detect(clip=file_path, rate=rate, start=start_sec, end=end_sec)
# Add result to database
objects = [(obj_cls, detection_start_time + timezone.timedelta(seconds=time)) for obj_cls, time in res]
create_object_detection(cid=clip.id, sample_rate=rate, start_time=detection_start_time,
end_time=detection_end_time, objects=objects)
# Update progress since detection of one clip is finished
update_progress(pid=pid)
def detect(self, clip: str, rate: int = 1, start: int = 0, end: int = None, thresh: float = 0.5) -> \
List[Tuple[str, int]]:
"""
Detects objects in a clip.
:param clip: Absolute path to clip.
:param rate: Seconds between each analyzed frame.
:param start: Which second in clip to start object detection.
:param end: Which second in clip to end object detection.
:param thresh: YOLO confidence threshold.
:return: List of tuples with object class and time in seconds when it was detected.
"""
res = []
# Check if clip exists
if not os.path.isfile(path=clip):
raise FileNotFoundError
# Setup video
video = cv2.VideoCapture(clip)
frame_rate = int(video.get(cv2.CAP_PROP_FPS))
frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
# Convert rate, start and end to frames
rate = rate * frame_rate
start = int(start * frame_rate)
if end is None:
end = frames
else:
end = end * frame_rate
# Analyze clip frame by frame
for i in range(end):
success, frame = video.read()
if not success:
break # Video is over.
# Skip clips according to start and rate.
if i >= start and i % rate == start % rate:
# Prepare frame
frame = cv2.resize(frame, None, fx=0.4, fy=0.4)
height, width, channels = frame.shape
# Detect objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
self.net.setInput(blob)
outs = self.net.forward(self.output_layers)
# Process result of object detection.
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > thresh:
# Detected object
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
# Non maximum suppression
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
# Label result
for j in range(len(boxes)):
if j in indexes:
# Add detected object to res
label = str(self.classes[class_ids[j]])
res.append((label, int(i / frame_rate)))
if self.debug:
x, y, w, h = boxes[j]
color = self.colors[j]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y + 30), cv2.FONT_HERSHEY_PLAIN, 3, color, 3)
if self.debug:
# Display frame with detection
cv2.imshow("Image", frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
return res
|
genetic_algorithm.py | from __future__ import print_function
import time
import random
from operator import attrgetter
from gaps import image_helpers
from gaps.selection import roulette_selection
# from gaps.plot import Plot
from gaps.progress_bar import print_progress
from gaps.crowd.crossover import Crossover
from gaps.crowd.individual import Individual
from gaps.crowd.nodes import NodesAndHints
from gaps.crowd.crowd_individual import CrowdIndividual
from gaps.crowd.image_analysis import ImageAnalysis
from gaps.config import Config
from gaps.utils import notify_crowdjigsaw_server
from multiprocessing import Process, Queue
from gaps.crowd.fitness import db_update, dissimilarity_measure
from gaps.crowd.dbaccess import mongo_wrapper
import redis
import json
import numpy as np
redis_cli = redis.Redis(connection_pool=Config.pool)
def worker(pid, start_time, pieces, elite_size):
def calc_rank_fitness(population):
rank1 = 0
while rank1 < len(population):
fitness1 = Config.get_rank_fitness(rank1, len(population))
indiv1 = population[rank1]
rank2 = rank1 + 1
for rank2 in range(rank1+1, len(population)):
indiv2 = population[rank2]
if abs(indiv1.objective-indiv2.objective) > 1e-6:
break
fitness2 = Config.get_rank_fitness(rank2 - 1, len(population))
for indiv in population[rank1: rank2]:
indiv._fitness = (fitness1 + fitness2) / 2.0
rank1 = rank2
from gaps.crowd.fitness import db_update
children = set()
while True:
redis_key = 'round:%d:dissimilarity' % Config.round_id
dissimilarity_json = redis_cli.get(redis_key)
if dissimilarity_json:
dissimilarity_measure.measure_dict = json.loads(dissimilarity_json)
else:
continue
refreshTimeStamp(start_time)
#db_update()
ImageAnalysis.analyze_image(pieces)
redis_key = 'round:%d:parents' % (Config.round_id)
parents_json = redis_cli.hget(redis_key, 'process:%d' % pid)
parents = []
elite = []
if parents_json:
parents_data = json.loads(parents_json)
#print(pid, len(parents_data))
if parents_data and len(parents_data) == 49:
parents = [(Individual([pieces[_] for _ in f], Config.cli_args.rows, Config.cli_args.cols, False),
Individual([pieces[_] for _ in s], Config.cli_args.rows, Config.cli_args.cols, False))
for (f, s) in parents_data]
#print('process %d get %d parents from redis' % (pid, len(parents)))
if not parents:
if not children:
continue
children = list(map(lambda x: [int(_) for _ in x.split(',')], children))
children = [Individual([pieces[_] for _ in c], Config.cli_args.rows, Config.cli_args.cols, False) for c in children]
children.sort(key=attrgetter("objective"))
elite = children[-elite_size:] if elite_size > 0 else []
calc_rank_fitness(children)
parents = roulette_selection(children, elites=elite_size)
#print('process %d get %d parents from itself' % (pid, len(parents)))
children = set()
for first_parent, second_parent in parents:
crossover = Crossover(first_parent, second_parent)
crossover.run()
child = crossover.child()
children.add(','.join([str(_) for _ in child.get_pieces_id_list()]))
#print(len(children))
while len(children) < 49:
random_child = [str(i) for i in range(len(pieces))]
np.random.shuffle(random_child)
#print(random_child)
children.add(','.join(random_child))
#print(len(children))
#print('process %d put %d children' % (pid, len(children)))
redis_key = 'round:%d:children' % (Config.round_id)
children_data = json.dumps(list(children))
redis_cli.hset(redis_key, 'process:%d' % pid, children_data)
def refreshTimeStamp(start_time):
Config.timestamp = (time.time() - start_time) * 1000
if not Config.cli_args.online:
Config.timestamp += mongo_wrapper.get_round_winner_time_milisecs() * Config.offline_start_percent * 1.0
def compute_edges_match(individual, columns, edges):
edges_match = 0.0
confidence_edges_match = 0.0
unconfidence_edges_match = 0.0
correct_edges_match = 0.0
confidence_edges = 0.0
unconfidence_edges = 0.0
correct_edges = 0.0
for e in edges:
edge = edges[e]
first_piece_id, second_piece_id = int(e.split('-')[0][:-1]), int(e.split('-')[1][1:])
edges_matched = False
correct_edge = False
if e.split('-')[0][-1] == 'L':
if second_piece_id == first_piece_id + 1:
correct_edge = True
correct_edges += 1
if individual.edge(first_piece_id, 'R') == second_piece_id:
edges_matched = True
edges_match += 1
if correct_edge:
correct_edges_match += 1
else:
if second_piece_id == first_piece_id + columns:
correct_edge = True
correct_edges += 1
if individual.edge(first_piece_id, 'D') == second_piece_id:
edges_matched = True
edges_match += 1
if correct_edge:
correct_edges_match += 1
wp = float(edge['wp'])
wn = float(edge['wn'])
confidence = wp * 1.0 / (wn + wp)
if confidence >= 0.618:
confidence_edges += 1
if edges_matched:
confidence_edges_match += 1
else:
unconfidence_edges += 1
if edges_matched:
unconfidence_edges_match += 1
len_edges = len(edges)
correct_edges = 1.0 if correct_edges == 0 else correct_edges
unconfidence_edges = 1.0 if unconfidence_edges == 0 else unconfidence_edges
confidence_edges = 1.0 if confidence_edges == 0 else confidence_edges
len_edges = 1.0 if len_edges == 0 else len_edges
return correct_edges_match / correct_edges, unconfidence_edges_match / unconfidence_edges, \
confidence_edges_match / confidence_edges, edges_match / len_edges
# Don't create two instantces for this class
class GeneticAlgorithm(object):
TERMINATION_THRESHOLD = 10
def __init__(self, image, piece_size, population_size, generations, r, c):
self._image = image
self._piece_size = piece_size
self._generations = generations
self._elite_size = Config.elite_size
pieces, rows, columns = image_helpers.flatten_image(image, piece_size, indexed=True, r=r, c=c)
self.rows = rows
self.columns = columns
self._population = [Individual(pieces, rows, columns) for _ in range(population_size)]
self._pieces = pieces
self.common_edges = dict()
def start_evolution(self, verbose):
with open('result_file_%d.csv' % Config.round_id , 'w') as f:
line = "%s,%s,%s,%s,%s,%s,%s\n" % ('time', 'cog_index', 'correct_in_db',
'total_in_db', 'correct_in_GA', 'total_in_GA', 'precision')
f.write(line)
'''
print("=== Pieces: {}\n".format(len(self._pieces)))
'''
if verbose:
from gaps.plot import Plot
plot = Plot(self._image)
#ImageAnalysis.analyze_image(self._pieces)
start_time = time.time()
fittest = None
best_fitness_score = float("-inf")
solution_found = False
if Config.multiprocess:
data_q = Queue()
res_q = Queue()
processes = []
for pid in range(Config.process_num):
p = Process(target=worker, args=(pid, start_time, self._pieces[:], 0))
p.start()
processes.append(p)
redis_key = 'round:%d:parents' % (Config.round_id)
redis_cli.hdel(redis_key, 'process:%d' % pid)
redis_key = 'round:%d:children' % (Config.round_id)
redis_cli.hdel(redis_key, 'process:%d' % pid)
old_crowd_edge_count = 1
for generation in range(self._generations):
if not Config.cli_args.online and not Config.cli_args.hide_detail:
print_progress(generation, self._generations - 1, prefix="=== Solving puzzle offline: ", start_time=start_time)
refreshTimeStamp(start_time)
## In crowd-based algorithm, we need to access database to updata fintess measure
## at the beginning of each generation.
# update fitness from database.
generation_start_time = time.time()
db_update()
if not Config.cli_args.hide_detail:
print("edge_count:{}/edge_prop:{}".format(db_update.crowd_edge_count, db_update.crowd_edge_count/Config.total_edges))
redis_key = 'round:%d:dissimilarity' % Config.round_id
dissimilarity_json = json.dumps(dissimilarity_measure.measure_dict)
#print(dissimilarity_json)
redis_cli.set(redis_key, dissimilarity_json)
# calculate dissimilarity and best_match_table.
ImageAnalysis.analyze_image(self._pieces)
# fitness of all individuals need to be re-calculated.
for _individual in self._population:
_individual._objective = None
_individual._fitness = None
db_update_time = time.time()
new_population = []
# random.shuffle(self._population)
self._population.sort(key=attrgetter("objective"))
#print(','.join([str(ind.get_pieces_id_list()) for ind in self._population]))
# Elitism
# elite = self._get_elite_individuals(elites=self._elite_size)
elite = self._population[-self._elite_size:]
new_population.extend(elite)
if Config.fitness_func_name == 'rank-based':
#!!! self._population needs to be sorted first
# for rank, indiv in enumerate(self._population):
# indiv.calc_rank_fitness(rank)
self.calc_rank_fitness()
select_elite_time = time.time()
if solution_found:
print("GA found a solution for round {}!".format(Config.round_id))
if Config.cli_args.online:
GA_time = time.time() - (mongo_wrapper.get_round_start_milisecs() / 1000.0)
print("GA time: %.3f" % GA_time)
else:
winner_time = mongo_wrapper.get_round_winner_time_milisecs() / 1000.0
GA_time = time.time() - start_time + \
mongo_wrapper.get_round_winner_time_milisecs() * Config.offline_start_percent / 1000.0
print("solved, winner time: %.3f, GA time: %.3f" % (winner_time, GA_time))
if Config.multiprocess:
for p in processes:
p.terminate()
notify_crowdjigsaw_server()
exit(0)
self._get_common_edges(elite[:4])
selected_parents = roulette_selection(self._population, elites=self._elite_size)
select_parent_time = time.time()
result = set()
if Config.multiprocess:
# multiprocessing
worker_args = []
# assign equal amount of work to process_num-1 processes
redis_key = 'round:%d:parents' % (Config.round_id)
redis_data = {}
for pid in range(Config.process_num):
parents_data = json.dumps([(f_parent.get_pieces_id_list(), s_parent.get_pieces_id_list())
for (f_parent, s_parent) in selected_parents[(len(selected_parents)//Config.process_num)*pid \
: (len(selected_parents)//Config.process_num)*(pid+1)]])
redis_data['process:%d' % pid] = parents_data
redis_cli.hmset(redis_key, redis_data)
redis_key = 'round:%d:children' % (Config.round_id)
for pid in range(Config.process_num):
while True:
children_json = redis_cli.hget(redis_key, 'process:%d' % pid)
if children_json:
children_data = json.loads(children_json)
if children_data:
if len(children_data) != 49:
continue
redis_key = 'round:%d:parents' % (Config.round_id)
redis_cli.hdel(redis_key, 'process:%d' % pid)
redis_key = 'round:%d:children' % (Config.round_id)
redis_cli.hdel(redis_key, 'process:%d' % pid)
result.update(children_data)
break
else:
# non multiprocessing
for first_parent, second_parent in selected_parents:
crossover = Crossover(first_parent, second_parent)
crossover.run()
child = crossover.child()
result.add(','.join([str(_) for _ in child.get_pieces_id_list()]))
while len(result) < len(selected_parents):
random_child = [str(i) for i in range(len(self._pieces))]
np.random.shuffle(random_child)
result.add(','.join(random_child))
result = list(map(lambda x: [int(_) for _ in x.split(',')], result))
result = [Individual([self._pieces[_] for _ in c], Config.cli_args.rows, Config.cli_args.cols, False) for c in result]
new_population.extend(result)
for child in new_population:
if child.is_solution():
fittest = child
redis_key = 'round:' + str(Config.round_id) + ':GA_edges'
res = redis_cli.set(redis_key, json.dumps(list(child.edges_set())))
solution_found = True
break
crossover_time = time.time()
if not solution_found:
fittest = self._best_individual()
if fittest.fitness > best_fitness_score:
best_fitness_score = fittest.fitness
self._population = new_population
if verbose:
from gaps.plot import Plot
plot.show_fittest(fittest.to_image(), "Generation: {} / {}".format(generation + 1, self._generations))
times = {
'generation_time': time.time() - generation_start_time,
'db_update_time': db_update_time - generation_start_time,
'select_elite_time': select_elite_time - db_update_time,
'select_parent_time': select_parent_time - select_elite_time,
'crossover_time': crossover_time - select_parent_time
}
print(times)
return fittest
def _remove_unconfident_edges(self, edges_set):
old_size = len(edges_set)
for e in list(edges_set):
if e in db_update.edges_confidence and db_update.edges_confidence[e] < 0.618:
edges_set.remove(e)
new_size = len(edges_set)
if old_size != new_size:
print('remove %d edges' % (old_size - new_size))
def _merge_common_edges(self, old_edges_set, new_edges_set):
links = {
'L-R': {},
'T-B': {}
}
for edges_set in [old_edges_set, new_edges_set]:
for e in edges_set:
left, right = e.split('-')
x, tag, y = left[:-1], 'L-R' if left[-1] == 'L' else 'T-B', right[1:]
links[tag][x] = y
merged_set = set()
for orient in links:
for x, y in links[orient].items():
merged_set.add(x + orient + y)
return merged_set
def _get_common_edges(self, individuals):
confident_edges_sets, edges_sets = [], []
for individual in individuals:
edges_set = individual.edges_set()
confident_edges_set = individual.confident_edges_set()
edges_sets.append(edges_set)
confident_edges_sets.append(confident_edges_set)
confident_edges_set = confident_edges_sets[0]
for i in range(1, len(confident_edges_sets)):
confident_edges_set = confident_edges_set | confident_edges_sets[i]
edges_set = edges_sets[0]
for i in range(1, len(edges_sets)):
edges_set = edges_set & edges_sets[i]
correct_links = 0
#self._remove_unconfident_edges(self.common_edges)
old_common_edges = list(self.common_edges.items())
for k, v in old_common_edges:
if v < 1:
del self.common_edges[k]
else:
self.common_edges[k] = v / 2
new_common_edges = self._merge_common_edges(confident_edges_set, edges_set)
#new_common_edges = self._merge_common_edges(self.common_edges.keys(), new_common_edges)
for e in new_common_edges:
self.common_edges[e] = 32
for e in new_common_edges:
left, right = e.split('-')
x = int(left[:-1])
y = int(right[1:])
if left[-1] == 'L':
if x + 1 == y and y % Config.cli_args.rows != 0:
correct_links += 1
else:
if x + Config.cli_args.rows == y:
correct_links += 1
with open('result_file_%d.csv' % Config.round_id , 'a') as f:
line = "%d,%d,%d,%d,%d,%d,%.4f\n" % (Config.timestamp, db_update.cog_index, db_update.crowd_correct_edge,
db_update.crowd_edge_count, correct_links, len(new_common_edges),
0 if len(new_common_edges) == 0 else float(correct_links) / float(len(new_common_edges)))
f.write(line)
redis_key = 'round:' + str(Config.round_id) + ':GA_edges'
redis_cli.set(redis_key, json.dumps(list(new_common_edges)))
print('\ntimestamp:', Config.timestamp, 'cog index:', db_update.cog_index,
'\ncorrect edges in db:', db_update.crowd_correct_edge, 'total edges in db:', db_update.crowd_edge_count,
'\ncorrect edges in GA:', correct_links, 'total edges in GA:', len(new_common_edges))
return edges_set
'''
def _get_elite_individuals(self, elites):
"""Returns first 'elite_count' fittest individuals from population"""
return sorted(self._population, key=attrgetter("fitness"))[-elites:]
'''
def _best_individual(self):
"""Returns the fittest individual from population"""
return max(self._population, key=attrgetter("fitness"))
def calc_rank_fitness(self):
rank1 = 0
while rank1 < len(self._population):
fitness1 = Config.get_rank_fitness(rank1, len(self._population))
indiv1 = self._population[rank1]
rank2 = rank1 + 1
for rank2 in range(rank1+1, len(self._population)):
indiv2 = self._population[rank2]
if abs(indiv1.objective-indiv2.objective) > 1e-6:
break
fitness2 = Config.get_rank_fitness(rank2 - 1, len(self._population))
for indiv in self._population[rank1: rank2]:
indiv._fitness = (fitness1 + fitness2) / 2.0
rank1 = rank2
|
manager.py | import multiprocessing as mp
import queue
def myProcess(ns):
# Update values within our namespace
print(ns.x)
ns.x = 2
def main():
manager = mp.Manager()
ns = manager.Namespace()
ns.x = 1
print(ns)
process = mp.Process(target=myProcess, args=(ns,))
process.start()
process.join()
print(ns)
if __name__ == '__main__':
main()
|
base_test.py | # Copyright 2013-present Barefoot Networks, Inc.
# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Antonin Bas (antonin@barefootnetworks.com)
# Carmelo Cascone (carmelo@opennetworking.org)
#
import logging
# https://stackoverflow.com/questions/24812604/hide-scapy-warning-message-ipv6
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
import itertools
import Queue
import sys
import threading
import time
from StringIO import StringIO
from functools import wraps, partial
from unittest import SkipTest
import google.protobuf.text_format
import grpc
import ptf
import scapy.packet
import scapy.utils
from google.protobuf import text_format
from google.rpc import status_pb2, code_pb2
from ipaddress import ip_address
from p4.config.v1 import p4info_pb2
from p4.v1 import p4runtime_pb2, p4runtime_pb2_grpc
from ptf import config
from ptf import testutils as testutils
from ptf.base_tests import BaseTest
from ptf.dataplane import match_exp_pkt
from ptf.packet import IPv6
from scapy.layers.inet6 import *
from scapy.layers.l2 import Ether
from scapy.pton_ntop import inet_pton, inet_ntop
from scapy.utils6 import in6_getnsma, in6_getnsmac
from helper import P4InfoHelper
DEFAULT_PRIORITY = 10
IPV6_MCAST_MAC_1 = "33:33:00:00:00:01"
SWITCH1_MAC = "00:00:00:00:aa:01"
SWITCH2_MAC = "00:00:00:00:aa:02"
SWITCH3_MAC = "00:00:00:00:aa:03"
HOST1_MAC = "00:00:00:00:00:01"
HOST2_MAC = "00:00:00:00:00:02"
MAC_BROADCAST = "FF:FF:FF:FF:FF:FF"
MAC_FULL_MASK = "FF:FF:FF:FF:FF:FF"
MAC_MULTICAST = "33:33:00:00:00:00"
MAC_MULTICAST_MASK = "FF:FF:00:00:00:00"
SWITCH1_IPV6 = "2001:0:1::1"
SWITCH2_IPV6 = "2001:0:2::1"
SWITCH3_IPV6 = "2001:0:3::1"
SWITCH4_IPV6 = "2001:0:4::1"
HOST1_IPV6 = "2001:0000:85a3::8a2e:370:1111"
HOST2_IPV6 = "2001:0000:85a3::8a2e:370:2222"
IPV6_MASK_ALL = "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF"
ARP_ETH_TYPE = 0x0806
IPV6_ETH_TYPE = 0x86DD
ICMPV6_IP_PROTO = 58
NS_ICMPV6_TYPE = 135
NA_ICMPV6_TYPE = 136
# FIXME: this should be removed, use generic packet in test
PACKET_IN_INGRESS_PORT_META_ID = 1
def print_inline(text):
sys.stdout.write(text)
sys.stdout.flush()
# See https://gist.github.com/carymrobbins/8940382
# functools.partialmethod is introduced in Python 3.4
class partialmethod(partial):
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.func, instance,
*(self.args or ()), **(self.keywords or {}))
# Convert integer (with length) to binary byte string
# Equivalent to Python 3.2 int.to_bytes
# See
# https://stackoverflow.com/questions/16022556/has-python-3-to-bytes-been-back-ported-to-python-2-7
def stringify(n, length):
h = '%x' % n
s = ('0' * (len(h) % 2) + h).zfill(length * 2).decode('hex')
return s
def ipv4_to_binary(addr):
bytes_ = [int(b, 10) for b in addr.split('.')]
return "".join(chr(b) for b in bytes_)
def ipv6_to_binary(addr):
ip = ip_address(addr.decode("utf-8"))
return ip.packed
def mac_to_binary(addr):
bytes_ = [int(b, 16) for b in addr.split(':')]
return "".join(chr(b) for b in bytes_)
def format_pkt_match(received_pkt, expected_pkt):
# Taken from PTF dataplane class
stdout_save = sys.stdout
try:
# The scapy packet dissection methods print directly to stdout,
# so we have to redirect stdout to a string.
sys.stdout = StringIO()
print "========== EXPECTED =========="
if isinstance(expected_pkt, scapy.packet.Packet):
scapy.packet.ls(expected_pkt)
print '--'
scapy.utils.hexdump(expected_pkt)
print "========== RECEIVED =========="
if isinstance(received_pkt, scapy.packet.Packet):
scapy.packet.ls(received_pkt)
print '--'
scapy.utils.hexdump(received_pkt)
print "=============================="
return sys.stdout.getvalue()
finally:
sys.stdout.close()
sys.stdout = stdout_save # Restore the original stdout.
def format_pb_msg_match(received_msg, expected_msg):
result = StringIO()
result.write("========== EXPECTED PROTO ==========\n")
result.write(text_format.MessageToString(expected_msg))
result.write("========== RECEIVED PROTO ==========\n")
result.write(text_format.MessageToString(received_msg))
result.write("==============================\n")
val = result.getvalue()
result.close()
return val
def pkt_mac_swap(pkt):
orig_dst = pkt[Ether].dst
pkt[Ether].dst = pkt[Ether].src
pkt[Ether].src = orig_dst
return pkt
def pkt_route(pkt, mac_dst):
pkt[Ether].src = pkt[Ether].dst
pkt[Ether].dst = mac_dst
return pkt
def pkt_decrement_ttl(pkt):
if IP in pkt:
pkt[IP].ttl -= 1
elif IPv6 in pkt:
pkt[IPv6].hlim -= 1
return pkt
def genNdpNsPkt(target_ip, src_mac=HOST1_MAC, src_ip=HOST1_IPV6):
nsma = in6_getnsma(inet_pton(socket.AF_INET6, target_ip))
d = inet_ntop(socket.AF_INET6, nsma)
dm = in6_getnsmac(nsma)
p = Ether(dst=dm) / IPv6(dst=d, src=src_ip, hlim=255)
p /= ICMPv6ND_NS(tgt=target_ip)
p /= ICMPv6NDOptSrcLLAddr(lladdr=src_mac)
return p
def genNdpNaPkt(target_ip, target_mac,
src_mac=SWITCH1_MAC, dst_mac=IPV6_MCAST_MAC_1,
src_ip=SWITCH1_IPV6, dst_ip=HOST1_IPV6):
p = Ether(src=src_mac, dst=dst_mac)
p /= IPv6(dst=dst_ip, src=src_ip, hlim=255)
p /= ICMPv6ND_NA(tgt=target_ip)
p /= ICMPv6NDOptDstLLAddr(lladdr=target_mac)
return p
class P4RuntimeErrorFormatException(Exception):
"""Used to indicate that the gRPC error Status object returned by the server has
an incorrect format.
"""
def __init__(self, message):
super(P4RuntimeErrorFormatException, self).__init__(message)
# Used to iterate over the p4.Error messages in a gRPC error Status object
class P4RuntimeErrorIterator:
def __init__(self, grpc_error):
assert (grpc_error.code() == grpc.StatusCode.UNKNOWN)
self.grpc_error = grpc_error
error = None
# The gRPC Python package does not have a convenient way to access the
# binary details for the error: they are treated as trailing metadata.
for meta in itertools.chain(self.grpc_error.initial_metadata(),
self.grpc_error.trailing_metadata()):
if meta[0] == "grpc-status-details-bin":
error = status_pb2.Status()
error.ParseFromString(meta[1])
break
if error is None:
raise P4RuntimeErrorFormatException("No binary details field")
if len(error.details) == 0:
raise P4RuntimeErrorFormatException(
"Binary details field has empty Any details repeated field")
self.errors = error.details
self.idx = 0
def __iter__(self):
return self
def next(self):
while self.idx < len(self.errors):
p4_error = p4runtime_pb2.Error()
one_error_any = self.errors[self.idx]
if not one_error_any.Unpack(p4_error):
raise P4RuntimeErrorFormatException(
"Cannot convert Any message to p4.Error")
if p4_error.canonical_code == code_pb2.OK:
continue
v = self.idx, p4_error
self.idx += 1
return v
raise StopIteration
# P4Runtime uses a 3-level message in case of an error during the processing of
# a write batch. This means that if we do not wrap the grpc.RpcError inside a
# custom exception, we can end-up with a non-helpful exception message in case
# of failure as only the first level will be printed. In this custom exception
# class, we extract the nested error message (one for each operation included in
# the batch) in order to print error code + user-facing message. See P4 Runtime
# documentation for more details on error-reporting.
class P4RuntimeWriteException(Exception):
def __init__(self, grpc_error):
assert (grpc_error.code() == grpc.StatusCode.UNKNOWN)
super(P4RuntimeWriteException, self).__init__()
self.errors = []
try:
error_iterator = P4RuntimeErrorIterator(grpc_error)
for error_tuple in error_iterator:
self.errors.append(error_tuple)
except P4RuntimeErrorFormatException:
raise # just propagate exception for now
def __str__(self):
message = "Error(s) during Write:\n"
for idx, p4_error in self.errors:
code_name = code_pb2._CODE.values_by_number[
p4_error.canonical_code].name
message += "\t* At index {}: {}, '{}'\n".format(
idx, code_name, p4_error.message)
return message
# This code is common to all tests. setUp() is invoked at the beginning of the
# test and tearDown is called at the end, no matter whether the test passed /
# failed / errored.
# noinspection PyUnresolvedReferences
class P4RuntimeTest(BaseTest):
def setUp(self):
BaseTest.setUp(self)
# Setting up PTF dataplane
self.dataplane = ptf.dataplane_instance
self.dataplane.flush()
self._swports = []
for device, port, ifname in config["interfaces"]:
self._swports.append(port)
self.port1 = self.swports(1)
self.port2 = self.swports(2)
self.port3 = self.swports(3)
grpc_addr = testutils.test_param_get("grpcaddr")
if grpc_addr is None:
grpc_addr = 'localhost:50051'
self.device_id = int(testutils.test_param_get("device_id"))
if self.device_id is None:
self.fail("Device ID is not set")
self.cpu_port = int(testutils.test_param_get("cpu_port"))
if self.cpu_port is None:
self.fail("CPU port is not set")
pltfm = testutils.test_param_get("pltfm")
if pltfm is not None and pltfm == 'hw' and getattr(self, "_skip_on_hw",
False):
raise SkipTest("Skipping test in HW")
self.channel = grpc.insecure_channel(grpc_addr)
self.stub = p4runtime_pb2_grpc.P4RuntimeStub(self.channel)
proto_txt_path = testutils.test_param_get("p4info")
# print "Importing p4info proto from", proto_txt_path
self.p4info = p4info_pb2.P4Info()
with open(proto_txt_path, "rb") as fin:
google.protobuf.text_format.Merge(fin.read(), self.p4info)
self.helper = P4InfoHelper(proto_txt_path)
# used to store write requests sent to the P4Runtime server, useful for
# autocleanup of tests (see definition of autocleanup decorator below)
self.reqs = []
self.election_id = 1
self.set_up_stream()
def set_up_stream(self):
self.stream_out_q = Queue.Queue()
self.stream_in_q = Queue.Queue()
def stream_req_iterator():
while True:
p = self.stream_out_q.get()
if p is None:
break
yield p
def stream_recv(stream):
for p in stream:
self.stream_in_q.put(p)
self.stream = self.stub.StreamChannel(stream_req_iterator())
self.stream_recv_thread = threading.Thread(
target=stream_recv, args=(self.stream,))
self.stream_recv_thread.start()
self.handshake()
def handshake(self):
req = p4runtime_pb2.StreamMessageRequest()
arbitration = req.arbitration
arbitration.device_id = self.device_id
election_id = arbitration.election_id
election_id.high = 0
election_id.low = self.election_id
self.stream_out_q.put(req)
rep = self.get_stream_packet("arbitration", timeout=2)
if rep is None:
self.fail("Failed to establish handshake")
def tearDown(self):
self.tear_down_stream()
BaseTest.tearDown(self)
def tear_down_stream(self):
self.stream_out_q.put(None)
self.stream_recv_thread.join()
def get_packet_in(self, timeout=2):
msg = self.get_stream_packet("packet", timeout)
if msg is None:
self.fail("PacketIn message not received")
else:
return msg.packet
def verify_packet_in(self, exp_packet_in_msg, timeout=2):
rx_packet_in_msg = self.get_packet_in(timeout=timeout)
# Check payload first, then metadata
rx_pkt = Ether(rx_packet_in_msg.payload)
exp_pkt = exp_packet_in_msg.payload
if not match_exp_pkt(exp_pkt, rx_pkt):
self.fail("Received PacketIn.payload is not the expected one\n"
+ format_pkt_match(rx_pkt, exp_pkt))
rx_meta_dict = {m.metadata_id: m.value
for m in rx_packet_in_msg.metadata}
exp_meta_dict = {m.metadata_id: m.value
for m in exp_packet_in_msg.metadata}
shared_meta = {mid: rx_meta_dict[mid] for mid in rx_meta_dict
if mid in exp_meta_dict
and rx_meta_dict[mid] == exp_meta_dict[mid]}
if len(rx_meta_dict) is not len(exp_meta_dict) \
or len(shared_meta) is not len(exp_meta_dict):
self.fail("Received PacketIn.metadata is not the expected one\n"
+ format_pb_msg_match(rx_packet_in_msg,
exp_packet_in_msg))
def get_stream_packet(self, type_, timeout=1):
start = time.time()
try:
while True:
remaining = timeout - (time.time() - start)
if remaining < 0:
break
msg = self.stream_in_q.get(timeout=remaining)
if not msg.HasField(type_):
continue
return msg
except: # timeout expired
pass
return None
def send_packet_out(self, packet):
packet_out_req = p4runtime_pb2.StreamMessageRequest()
packet_out_req.packet.CopyFrom(packet)
self.stream_out_q.put(packet_out_req)
def swports(self, idx):
if idx >= len(self._swports):
self.fail("Index {} is out-of-bound of port map".format(idx))
return self._swports[idx]
def _write(self, req):
try:
return self.stub.Write(req)
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.UNKNOWN:
raise e
raise P4RuntimeWriteException(e)
def write_request(self, req, store=True):
rep = self._write(req)
if store:
self.reqs.append(req)
return rep
def insert(self, entity):
if isinstance(entity, list) or isinstance(entity, tuple):
for e in entity:
self.insert(e)
return
req = self.get_new_write_request()
update = req.updates.add()
update.type = p4runtime_pb2.Update.INSERT
if isinstance(entity, p4runtime_pb2.TableEntry):
msg_entity = update.entity.table_entry
elif isinstance(entity, p4runtime_pb2.ActionProfileGroup):
msg_entity = update.entity.action_profile_group
elif isinstance(entity, p4runtime_pb2.ActionProfileMember):
msg_entity = update.entity.action_profile_member
else:
self.fail("Entity %s not supported" % entity.__name__)
msg_entity.CopyFrom(entity)
self.write_request(req)
def get_new_write_request(self):
req = p4runtime_pb2.WriteRequest()
req.device_id = self.device_id
election_id = req.election_id
election_id.high = 0
election_id.low = self.election_id
return req
def insert_pre_multicast_group(self, group_id, ports):
req = self.get_new_write_request()
update = req.updates.add()
update.type = p4runtime_pb2.Update.INSERT
pre_entry = update.entity.packet_replication_engine_entry
mg_entry = pre_entry.multicast_group_entry
mg_entry.multicast_group_id = group_id
for port in ports:
replica = mg_entry.replicas.add()
replica.egress_port = port
replica.instance = 0
return req, self.write_request(req)
def insert_pre_clone_session(self, session_id, ports, cos=0,
packet_length_bytes=0):
req = self.get_new_write_request()
update = req.updates.add()
update.type = p4runtime_pb2.Update.INSERT
pre_entry = update.entity.packet_replication_engine_entry
clone_entry = pre_entry.clone_session_entry
clone_entry.session_id = session_id
clone_entry.class_of_service = cos
clone_entry.packet_length_bytes = packet_length_bytes
for port in ports:
replica = clone_entry.replicas.add()
replica.egress_port = port
replica.instance = 1
return req, self.write_request(req)
# iterates over all requests in reverse order; if they are INSERT updates,
# replay them as DELETE updates; this is a convenient way to clean-up a lot
# of switch state
def undo_write_requests(self, reqs):
updates = []
for req in reversed(reqs):
for update in reversed(req.updates):
if update.type == p4runtime_pb2.Update.INSERT:
updates.append(update)
new_req = self.get_new_write_request()
for update in updates:
update.type = p4runtime_pb2.Update.DELETE
new_req.updates.add().CopyFrom(update)
self._write(new_req)
# this decorator can be used on the runTest method of P4Runtime PTF tests
# when it is used, the undo_write_requests will be called at the end of the test
# (irrespective of whether the test was a failure, a success, or an exception
# was raised). When this is used, all write requests must be performed through
# one of the send_request_* convenience functions, or by calling write_request;
# do not use stub.Write directly!
# most of the time, it is a great idea to use this decorator, as it makes the
# tests less verbose. In some circumstances, it is difficult to use it, in
# particular when the test itself issues DELETE request to remove some
# objects. In this case you will want to do the cleanup yourself (in the
# tearDown function for example); you can still use undo_write_request which
# should make things easier.
# because the PTF test writer needs to choose whether or not to use autocleanup,
# it seems more appropriate to define a decorator for this rather than do it
# unconditionally in the P4RuntimeTest tearDown method.
def autocleanup(f):
@wraps(f)
def handle(*args, **kwargs):
test = args[0]
assert (isinstance(test, P4RuntimeTest))
try:
return f(*args, **kwargs)
finally:
test.undo_write_requests(test.reqs)
return handle
def skip_on_hw(cls):
cls._skip_on_hw = True
return cls
|
doorbell_pi_start.py | #Made by Hurleyking
#https://github.com/Hurleyking/AI_Smart_PI_Doorbell
import requests
import uuid
#import cv2
from gtts import gTTS
import os
import subprocess
from subprocess import call
import json
import time
from gpiozero import Button
from time import sleep
from datetime import datetime
import paho.mqtt.client as mqttClient
from picamera import PiCamera
from signal import pause
from threading import Thread
import uuid
import base64
#Global Variables
global sucess_press_button_time
global http_server
global http_password
global http_user
global client_listen
global play_sound_type
global personGroupId
global personId
global faces_list
global face_identify
global faces_on_photo
#Variables
http_server = '192.168.1.84:8008'
http_password = 'hurley'
http_user = 'hurley'
#Function enable/disable
Call_voip_if_ring = 'False'
face_recognition_state = 'True'
#Fix Variables
sucess_press_button_time = datetime.now()
def get_credential_and_settings():
response = requests.get('http://'+http_server, auth=(http_user, http_password))
credential_and_settings = str(response.text.decode('utf-8'))
credential_and_settings = json.loads(credential_and_settings)
global subscription_key
subscription_key = credential_and_settings
print('get_credential_and_settings Finish.')
return subscription_key
get_credential_and_settings()
assert subscription_key
def detect():
global faces_on_photo
img_file = '/var/www/html/last_ring.jpg'
with open(img_file, 'rb') as f:
binary = f.read()
f.close()
face_api_url = subscription_key['API_Endpoint_azure']+'/detect'
headers = {'Ocp-Apim-Subscription-Key': subscription_key['API_key_azure'], "Content-Type" :'application/octet-stream'}
params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,' +
'emotion,hair,makeup,occlusion,accessories,blur,exposure,noise'
}
response = requests.post(face_api_url, params=params, headers=headers, data=binary)
faces_on_photo = response.json()
return faces_on_photo
def person_group_create_v2():
global personGroupId
name = 'DOORBELL'
personGroupId = str(uuid.uuid4())
endpoint = subscription_key['API_Endpoint_azure']+'/persongroups/{}'.format(personGroupId)
headers = {"Ocp-Apim-Subscription-Key": subscription_key['API_key_azure']}
json = { 'name': name }
r = requests.put(endpoint, json=json,headers=headers)
if r.status_code != 200:
print('error:' + r.text)
else:
print('Person group created with ID: '+personGroupId)
def get_persongroup_list():
global personGroupId
endpoint = subscription_key['API_Endpoint_azure']+'/persongroups'
headers = {"Ocp-Apim-Subscription-Key": subscription_key['API_key_azure']}
r=requests.get(endpoint,headers=headers)
if r.status_code != 200:
print('error:' + r.text)
else:
personGroupId =r.json()
personGroupId = str(personGroupId[0]['personGroupId'])
return personGroupId
def person_create(personName):
global personGroupId
global personId
endpoint = subscription_key['API_Endpoint_azure']+'/persongroups/{}'.format(personGroupId)+'/persons'
headers = {"Ocp-Apim-Subscription-Key": subscription_key['API_key_azure']}
json = {
'name': personName
}
r = requests.post(endpoint, json=json,headers=headers)
if r.status_code != 200:
print('error:' + r.text)
else:
res=r.json()
personId = res['personId']
print('Person created: '+ personName+'with ID: '+personId)
return personId
def person_addface(path_face):
global personGroupId
global personId
endpoint = subscription_key['API_Endpoint_azure']+'/persongroups/{}/persons/{}/persistedFaces'.format(personGroupId, personId)
headers = {"Ocp-Apim-Subscription-Key": subscription_key['API_key_azure'], "Content-Type": "application/octet-stream"}
img_file = path_face
with open(img_file, 'rb') as f:
binaryImage = f.read()
f.close()
r = requests.post(endpoint, headers=headers,data=binaryImage)
if r.status_code != 200:
print('error:' + r.text)
else:
res=r.json()
if res.get('persistedFaceId'):
persistedFaceId = res['persistedFaceId']
print('A Face Added to the person with ID: '+personId)
print(persistedFaceId)
return persistedFaceId
else:
print('no persistedfaceid found')
def identify():
global personGroupId
global face_identify
global faces_on_photo
face_api_url = subscription_key['API_Endpoint_azure']+'/identify'
headers = {'Ocp-Apim-Subscription-Key': subscription_key['API_key_azure']}
try:
params = {
"personGroupId": personGroupId,
"faceIds": [faces_on_photo[0]['faceId']],
"maxNumOfCandidatesReturned": 1,
"confidenceThreshold": 0.5
}
response = requests.post(face_api_url, params=params, headers=headers,json=params)
if response.status_code == 200:
check_faces = response.json()
for check_face in check_faces:
for know_face in faces_list:
if know_face['personId'] == check_face['candidates'][0]['personId']:
face_identify = know_face['name']
print face_identify
else:
face_identify = 'Stranger'
except:
face_identify = 'Stranger'
return face_identify
def faces_list():
global personGroupId
global faces_list
endpoint = subscription_key['API_Endpoint_azure']+'/persongroups/'+personGroupId+'/persons'
headers = {"Ocp-Apim-Subscription-Key": subscription_key['API_key_azure']}
r=requests.get(endpoint,headers=headers)
if r.status_code != 200:
print('error:' + r.text)
else:
faces_list=r.json()
return faces_list
def persongroup_train():
global personGroupId
endpoint = subscription_key['API_Endpoint_azure']+'/persongroups/{}'.format(personGroupId)+'/train'
headers = {"Ocp-Apim-Subscription-Key": subscription_key['API_key_azure']}
r=requests.post(endpoint,headers=headers)
if r.status_code != 202:
print('error:' + r.text)
else:
print('Training is succesfully completed')
def TEXT_TO_SPEECH(mytext):
global play_sound_type
mytext = 'Ola, estou contatar meus Proprietarios, por favor aguarde.'
language = 'PT'
myobj = gTTS(text=mytext, lang=language, slow=False)
myobj.save("generic.mp3")
play_sound_type = 'generic'
def voip_call():
print "call"
#subprocess.call(["linphonecsh", "init"])
subprocess.call(["linphonecsh", "generic", "call sip:"+subscription_key['voip_call_number']])
def button_ring():
global sucess_press_button_time
global play_sound_type
global face_identify
last_press_button_time = datetime.now()
diff_time_press_button = last_press_button_time - sucess_press_button_time
if diff_time_press_button.seconds > 30:
sucess_press_button_time = datetime.now()
take_picture('last_ring')
play_sound_type = 'welcome'
if subscription_key['face_recognition_state'] == 'True':
detect()
identify()
send_mqtt_message('doorbell/ring','{"ring": "True","face_recognition_state": "True","Name": "'+face_identify+'"}')
else:
send_mqtt_message('doorbell/ring','{"ring": "True","face_recognition_state": "False"}')
print("ring ring ring")
def send_mqtt_message(topic,message):
broker_address= subscription_key['ip_server_mqtt']
port = 1883
user = subscription_key['User_mqtt']
password = subscription_key['Password_mqtt']
client = mqttClient.Client("Python_ringbell_send")
client.username_pw_set(user, password=password)
client.connect(broker_address, port=port)
msg_info = client.publish(topic,message,qos=0)
if msg_info.is_published() == False:
msg_info.wait_for_publish()
client.disconnect()
def take_picture(name):
camera = PiCamera()
camera.resolution = (1024, 768)
sleep(2)
camera.capture('/var/www/html/'+name+'.jpg')
camera.close()
def record_video(name,resolution):
output_video_rasp = '/var/www/html/'+name+'.h264'
camera = PiCamera()
camera.resolution = (640, 480)
sleep(2)
camera.start_recording(output_video_rasp)
sleep(15)
camera.stop_recording()
camera.close()
output_video_mp4 = '/var/www/html/'+name+'.mp4'
retcode = call(["MP4Box", "-add", output_video_rasp,"-new" ,output_video_mp4,"-out",output_video_mp4])
sleep(1)
def listen_mqtt_message():
global client_listen
broker_address= subscription_key['ip_server_mqtt']
port = 1883
user = subscription_key['User_mqtt']
password = subscription_key['Password_mqtt']
client_listen = mqttClient.Client("Python_ringbell_loop")
client_listen.username_pw_set(user, password=password)
client_listen.connect(broker_address, port=port,keepalive=30)
client_listen.subscribe("doorbell/live")
client_listen.on_message = on_message
client_listen.loop_forever()
def on_message(client_listen, userdata, msg):
if msg.payload.decode() == "live_30sec":
print("Yes! Live 30sec")
record_video('live_30sec','480, 320')
send_mqtt_message('doorbell/live','live_30sec_ready')
if msg.payload.decode() == "live_30sec_alexa":
print("Yes! Live 30sec from alexa")
record_video('live_30sec','480, 320')
send_mqtt_message('doorbell/live','live_30sec_alexa_ready')
if msg.payload.decode() == "live_stream":
print("Yes! Live STream")
if msg.payload.decode() == "photo":
print("Yes! Open Photo")
take_picture('last_ring')
send_mqtt_message('doorbell/live','photo_ready')
print("Done! Open Photo")
if msg.payload.decode() == "open_gate":
print("Yes! Open Gate")
if '"new_person":"True"' in msg.payload.decode() :
print("Creating new person")
new_person = json.loads(msg.payload.decode())
auto_provision_new_faces(new_person[name])
send_mqtt_message('doorbell/live','Add new person with sucess')
def wait_press():
button_start = Button(26)
button_start.when_pressed = button_ring
pause()
def play_sounds():
global play_sound_type
while True:
if play_sound_type == 'welcome':
os.system("play welcome.mp3")
if subscription_key['Call_voip_if_ring'] == 'True':
voip_call()
else:
play_sound_type = 'ring'
sleep(0.1)
if play_sound_type == 'ring':
play_sound_type = 'Bye'
os.system("play ring.wav")
sleep(2)
if play_sound_type == 'Bye':
play_sound_type = 'none'
os.system("play bye.mp3")
if play_sound_type == 'start':
play_sound_type = 'none'
os.system("play start.mp3")
print('start sound ok')
if play_sound_type == 'generic':
play_sound_type = 'none'
os.system("play generic.mp3")
sleep(0.2)
def auto_provision_new_faces(name):
global personGroupId
get_persongroup_list()
detect()
person_create(name)
person_addface('/var/www/html/last_ring.jpg')
persongroup_train()
identify()
print('Finish')
if subscription_key['face_recognition_state'] == 'True':
#discomment function person_group_create_v2 to create the first groupid (after run script comment again please)
#person_group_create_v2()
get_persongroup_list()
faces_list()
#detect()
#identify()
#LOOP
play_sound_type = 'start'
print('Start')
if __name__ == '__main__':
Thread(target = wait_press).start()
Thread(target = listen_mqtt_message).start()
Thread(target = play_sounds).start()
#LOOP END
|
main.py | #!/usr/bin/env python3
# Copyright 2016-2019 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import annotations
import argparse
import traceback
import contextlib
import datetime
import io
import re
import os
import sys
import tarfile
import threading
import time
import json
import uuid
import hmac
import hashlib
from itertools import zip_longest
from functools import cmp_to_key, wraps
from urllib.parse import quote_plus, quote
from typing import List, Set, Dict, Tuple, Optional, Generator, Any, Type, Callable, Union
import requests
from flask import Flask, render_template, request, url_for, redirect, \
make_response, Blueprint, abort, jsonify, Request
from jinja2 import StrictUndefined
class Repository:
def __init__(self, name: str, variant: str, url: str, src_url: str):
self.name = name
self.variant = variant
self.url = url
self.src_url = src_url
@property
def files_url(self) -> str:
return self.url.rstrip("/") + "/" + self.name + ".files"
@property
def packages(self) -> "List[Package]":
global state
repo_packages = []
for s in state.sources:
for k, p in sorted(s.packages.items()):
if p.repo == self.name and p.repo_variant == self.variant:
repo_packages.append(p)
return repo_packages
@property
def csize(self) -> int:
return sum(int(p.csize) for p in self.packages)
@property
def isize(self) -> int:
return sum(int(p.isize) for p in self.packages)
REPOSITORIES = [
Repository("mingw32", "", "http://repo.msys2.org/mingw/i686", "https://github.com/msys2/MINGW-packages"),
Repository("mingw64", "", "http://repo.msys2.org/mingw/x86_64", "https://github.com/msys2/MINGW-packages"),
Repository("msys", "i686", "http://repo.msys2.org/msys/i686", "https://github.com/msys2/MSYS2-packages"),
Repository("msys", "x86_64", "http://repo.msys2.org/msys/x86_64", "https://github.com/msys2/MSYS2-packages"),
]
CONFIG = [
("http://repo.msys2.org/mingw/i686/mingw32.files", "mingw32", ""),
("http://repo.msys2.org/mingw/x86_64/mingw64.files", "mingw64", ""),
("http://repo.msys2.org/msys/i686/msys.files", "msys", "i686"),
("http://repo.msys2.org/msys/x86_64/msys.files", "msys", "x86_64"),
]
VERSION_CONFIG = []
for repo in ["core", "extra", "community", "testing", "community-testing",
"multilib"]:
VERSION_CONFIG.append(
("http://ftp.halifax.rwth-aachen.de/archlinux/"
"{0}/os/x86_64/{0}.db".format(repo), repo, ""))
SRCINFO_CONFIG = [
("https://github.com/msys2/msys2-web/releases/download/cache/srcinfo.json",
"", "")
]
def get_update_urls() -> List[str]:
urls = []
for config in VERSION_CONFIG + SRCINFO_CONFIG:
urls.append(config[0])
for repo in REPOSITORIES:
urls.append(repo.files_url)
return sorted(urls)
class AppState:
def __init__(self) -> None:
self._update_etag()
self._etag = ""
self._last_update = 0.0
self._sources: List[Source] = []
self._sourceinfos: Dict[str, SrcInfoPackage] = {}
self._versions: Dict[str, Tuple[str, str, int]] = {}
self._update_etag()
def _update_etag(self) -> None:
self._etag = str(uuid.uuid4())
self._last_update = time.time()
@property
def last_update(self) -> float:
return self._last_update
@property
def etag(self) -> str:
return self._etag
@property
def sources(self) -> List[Source]:
return self._sources
@sources.setter
def sources(self, sources: List[Source]) -> None:
self._sources = sources
self._update_etag()
@property
def sourceinfos(self) -> Dict[str, SrcInfoPackage]:
return self._sourceinfos
@sourceinfos.setter
def sourceinfos(self, sourceinfos: Dict[str, SrcInfoPackage]) -> None:
self._sourceinfos = sourceinfos
self._update_etag()
@property
def versions(self) -> Dict[str, Tuple[str, str, int]]:
return self._versions
@versions.setter
def versions(self, versions: Dict[str, Tuple[str, str, int]]) -> None:
self._versions = versions
self._update_etag()
UPDATE_INTERVAL = 60 * 5
REQUEST_TIMEOUT = 60
CACHE_LOCAL = False
state = AppState()
packages = Blueprint('packages', __name__, template_folder='templates')
def cache_route(f: Callable) -> Callable:
@wraps(f)
def wrapper(*args: Any, **kwargs: Any) -> Any:
global state
response = make_response()
response.set_etag(state.etag)
response.make_conditional(request)
if response.status_code == 304:
return response
result = f(*args, **kwargs)
if isinstance(result, str):
response.set_data(result)
return response
else:
return result
return wrapper
def parse_desc(t: str) -> Dict[str, List[str]]:
d: Dict[str, List[str]] = {}
cat = None
values: List[str] = []
for l in t.splitlines():
l = l.strip()
if cat is None:
cat = l
elif not l:
d[cat] = values
cat = None
values = []
else:
values.append(l)
if cat is not None:
d[cat] = values
return d
def cleanup_files(files: List[str]) -> List[str]:
"""Remove redundant directory paths and root them"""
last = None
result = []
for path in sorted(files, reverse=True):
if last is not None:
if path.endswith("/") and last.startswith(path):
continue
result.append("/" + path)
last = path
return result[::-1]
PackageKey = Tuple[str, str, str, str, str]
class Package:
def __init__(self, builddate: str, csize: str, depends: List[str], filename: str, files: List[str], isize: str,
makedepends: List[str], md5sum: str, name: str, pgpsig: str, sha256sum: str, arch: str,
base_url: str, repo: str, repo_variant: str, provides: List[str], conflicts: List[str], replaces: List[str],
version: str, base: str, desc: str, groups: List[str], licenses: List[str], optdepends: List[str],
checkdepends: List[str]) -> None:
self.builddate = int(builddate)
self.csize = csize
def split_depends(deps: List[str]) -> List[Tuple[str, str]]:
r = []
for d in deps:
parts = re.split("([<>=]+)", d, 1)
first = parts[0].strip()
second = "".join(parts[1:]).strip()
r.append((first, second))
return r
self.depends = split_depends(depends)
self.checkdepends = split_depends(checkdepends)
self.filename = filename
self.files = cleanup_files(files)
self.isize = isize
self.makedepends = split_depends(makedepends)
self.md5sum = md5sum
self.name = name
self.pgpsig = pgpsig
self.sha256sum = sha256sum
self.arch = arch
self.fileurl = base_url + "/" + quote(self.filename)
self.repo = repo
self.repo_variant = repo_variant
self.provides = dict(split_depends(provides))
self.conflicts = conflicts
self.replaces = replaces
self.version = version
self.base = base
self.desc = desc
self.groups = groups
self.licenses = licenses
self.rdepends: List[Tuple[Package, str]] = []
def split_opt(deps: List[str]) -> List[Tuple[str, str]]:
r = []
for d in deps:
if ":" in d:
a, b = d.split(":", 1)
r.append((a.strip(), b.strip()))
else:
r.append((d.strip(), ""))
return r
self.optdepends = split_opt(optdepends)
def __repr__(self) -> str:
return "Package(%s)" % self.fileurl
@property
def realprovides(self) -> Dict[str, str]:
prov = {}
for key, info in self.provides.items():
if key.startswith("mingw"):
key = key.split("-", 3)[-1]
prov[key] = info
return prov
@property
def realname(self) -> str:
if self.repo.startswith("mingw"):
return self.name.split("-", 3)[-1]
return self.name
@property
def git_version(self) -> str:
if self.name in state.sourceinfos:
return state.sourceinfos[self.name].build_version
return ""
@property
def key(self) -> PackageKey:
return (self.repo, self.repo_variant,
self.name, self.arch, self.fileurl)
@classmethod
def from_desc(cls: Type[Package], d: Dict[str, List[str]], base: str, base_url: str, repo: str, repo_variant: str) -> Package:
return cls(d["%BUILDDATE%"][0], d["%CSIZE%"][0],
d.get("%DEPENDS%", []), d["%FILENAME%"][0],
d.get("%FILES%", []), d["%ISIZE%"][0],
d.get("%MAKEDEPENDS%", []),
d["%MD5SUM%"][0], d["%NAME%"][0],
d.get("%PGPSIG%", [""])[0], d["%SHA256SUM%"][0],
d["%ARCH%"][0], base_url, repo, repo_variant,
d.get("%PROVIDES%", []), d.get("%CONFLICTS%", []),
d.get("%REPLACES%", []), d["%VERSION%"][0], base,
d.get("%DESC%", [""])[0], d.get("%GROUPS%", []),
d.get("%LICENSE%", []), d.get("%OPTDEPENDS%", []),
d.get("%CHECKDEPENDS%", []))
class Source:
def __init__(self, name: str, desc: str, url: str, packager: str,
repo: str, repo_variant: str):
self.name = name
self.desc = desc
self.url = url
self.packager = packager
self._repo = repo
self._repo_variant = repo_variant
self.packages: Dict[PackageKey, Package] = {}
@property
def repos(self) -> List[str]:
return sorted(set([p.repo for p in self.packages.values()]))
@property
def arches(self) -> List[str]:
return sorted(set([p.arch for p in self.packages.values()]))
@property
def groups(self) -> List[str]:
groups: Set[str] = set()
for p in self.packages.values():
groups.update(p.groups)
return sorted(groups)
@property
def version(self) -> str:
# get the newest version
versions: Set[str] = set([p.version for p in self.packages.values()])
return sorted(versions, key=cmp_to_key(vercmp), reverse=True)[0]
@property
def git_version(self) -> str:
# get the newest version
versions: Set[str] = set([p.git_version for p in self.packages.values()])
return sorted(versions, key=cmp_to_key(vercmp), reverse=True)[0]
@property
def licenses(self) -> List[str]:
licenses: Set[str] = set()
for p in self.packages.values():
licenses.update(p.licenses)
return sorted(licenses)
@property
def arch_url(self) -> str:
arch_info = get_arch_info_for_base(self)
if arch_info is not None:
return arch_info[1]
return ""
@property
def upstream_version(self) -> str:
arch_info = get_arch_info_for_base(self)
if arch_info is not None:
return extract_upstream_version(arch_info[0])
return ""
@property
def is_outdated(self) -> bool:
arch_version = self.upstream_version
if not arch_version:
return False
msys_version = extract_upstream_version(self.version)
return version_is_newer_than(arch_version, msys_version)
@property
def realname(self) -> str:
if self._repo.startswith("mingw"):
return self.name.split("-", 2)[-1]
return self.name
@property
def date(self) -> int:
"""The build date of the newest package"""
return sorted([p.builddate for p in self.packages.values()])[-1]
@property
def repo_url(self) -> str:
for p in self.packages.values():
if p.name in state.sourceinfos:
return state.sourceinfos[p.name].repo_url
for repo in REPOSITORIES:
if repo.name == p.repo:
return repo.src_url
return ""
@property
def repo_path(self) -> str:
for p in self.packages.values():
if p.name in state.sourceinfos:
return state.sourceinfos[p.name].repo_path
return self.name
@property
def source_url(self) -> str:
return self.repo_url + ("/tree/master/" + quote(self.repo_path))
@property
def history_url(self) -> str:
return self.repo_url + ("/commits/master/" + quote(self.repo_path))
@property
def filebug_url(self) -> str:
return self.repo_url + (
"/issues/new?title=" + quote_plus("[%s]" % self.realname))
@property
def searchbug_url(self) -> str:
return self.repo_url + (
"/issues?q=" + quote_plus("is:issue is:open %s" % self.realname))
@classmethod
def from_desc(cls, d: Dict[str, List[str]], repo: str, repo_variant: str) -> "Source":
name = d["%NAME%"][0]
if "%BASE%" not in d:
if repo.startswith("mingw"):
base = "mingw-w64-" + name.split("-", 3)[-1]
else:
base = name
else:
base = d["%BASE%"][0]
return cls(base, d.get("%DESC%", [""])[0], d.get("%URL%", [""])[0],
d["%PACKAGER%"][0], repo, repo_variant)
def add_desc(self, d: Dict[str, List[str]], base_url: str) -> None:
p = Package.from_desc(
d, self.name, base_url, self._repo, self._repo_variant)
assert p.key not in self.packages
self.packages[p.key] = p
def get_content_cached(url: str, *args: Any, **kwargs: Any) -> bytes:
if not CACHE_LOCAL:
r = requests.get(url, *args, **kwargs)
return r.content
base = os.path.dirname(os.path.realpath(__file__))
cache_dir = os.path.join(base, "_cache")
os.makedirs(cache_dir, exist_ok=True)
fn = os.path.join(cache_dir, url.replace("/", "_").replace(":", "_"))
if not os.path.exists(fn):
r = requests.get(url, *args, **kwargs)
with open(fn, "wb") as h:
h.write(r.content)
with open(fn, "rb") as h:
data = h.read()
return data
def parse_repo(repo: str, repo_variant: str, url: str) -> Dict[str, Source]:
base_url = url.rsplit("/", 1)[0]
sources: Dict[str, Source] = {}
print("Loading %r" % url)
def add_desc(d: Any, base_url: str) -> None:
source = Source.from_desc(d, repo, repo_variant)
if source.name not in sources:
sources[source.name] = source
else:
source = sources[source.name]
source.add_desc(d, base_url)
data = get_content_cached(url, timeout=REQUEST_TIMEOUT)
with io.BytesIO(data) as f:
with tarfile.open(fileobj=f, mode="r:gz") as tar:
packages: Dict[str, list] = {}
for info in tar.getmembers():
package_name = info.name.split("/", 1)[0]
infofile = tar.extractfile(info)
if infofile is None:
continue
with infofile:
packages.setdefault(package_name, []).append(
(info.name, infofile.read()))
for package_name, infos in sorted(packages.items()):
t = ""
for name, data in sorted(infos):
if name.endswith("/desc"):
t += data.decode("utf-8")
elif name.endswith("/depends"):
t += data.decode("utf-8")
elif name.endswith("/files"):
t += data.decode("utf-8")
desc = parse_desc(t)
add_desc(desc, base_url)
return sources
@packages.app_template_filter('timestamp')
def _jinja2_filter_timestamp(d: int) -> str:
try:
return datetime.datetime.fromtimestamp(
int(d)).strftime('%Y-%m-%d %H:%M:%S')
except OSError:
return "-"
@packages.app_template_filter('filesize')
def _jinja2_filter_filesize(d: int) -> str:
d = int(d)
if d > 1024 ** 3:
return "%.2f GB" % (d / (1024 ** 3))
else:
return "%.2f MB" % (d / (1024 ** 2))
@packages.context_processor
def funcs() -> Dict[str, Callable]:
def is_endpoint(value: str) -> bool:
if value.startswith(".") and request.blueprint is not None:
value = request.blueprint + value
return value == request.endpoint
def package_url(package: Package, name: str = None) -> str:
res: str = ""
if name is None:
res = url_for(".package", name=name or package.name)
res += "?repo=" + package.repo
if package.repo_variant:
res += "&variant=" + package.repo_variant
else:
res = url_for(".package", name=re.split("[<>=]+", name)[0])
if package.repo_variant:
res += "?repo=" + package.repo
res += "&variant=" + package.repo_variant
return res
def package_name(package: Package, name: str = None) -> str:
name = name or package.name
name = re.split("[<>=]+", name, 1)[0]
return (name or package.name) + (
"/" + package.repo_variant if package.repo_variant else "")
def package_restriction(package: Package, name: str = None) -> str:
name = name or package.name
return name[len(re.split("[<>=]+", name)[0]):].strip()
def update_timestamp() -> float:
global state
return state.last_update
return dict(package_url=package_url, package_name=package_name,
package_restriction=package_restriction,
update_timestamp=update_timestamp, is_endpoint=is_endpoint)
RouteResponse = Any
@packages.route('/repos')
@cache_route
def repos() -> RouteResponse:
global REPOSITORIES
return render_template('repos.html', repos=REPOSITORIES)
@packages.route('/')
def index() -> RouteResponse:
return redirect(url_for('.updates'))
@packages.route('/base')
@packages.route('/base/<name>')
@cache_route
def base(name: str = None) -> RouteResponse:
global state
if name is not None:
res = [s for s in state.sources if s.name == name]
return render_template('base.html', sources=res)
else:
return render_template('baseindex.html', sources=state.sources)
@packages.route('/group/')
@packages.route('/group/<name>')
@cache_route
def group(name: Optional[str] = None) -> RouteResponse:
global state
if name is not None:
res = []
for s in state.sources:
for k, p in sorted(s.packages.items()):
if name in p.groups:
res.append(p)
return render_template('group.html', name=name, packages=res)
else:
groups: Dict[str, int] = {}
for s in state.sources:
for k, p in sorted(s.packages.items()):
for name in p.groups:
groups[name] = groups.get(name, 0) + 1
return render_template('groups.html', groups=groups)
@packages.route('/package/<name>')
@cache_route
def package(name: str) -> RouteResponse:
global state
repo = request.args.get('repo')
variant = request.args.get('variant')
packages = []
for s in state.sources:
for k, p in sorted(s.packages.items()):
if p.name == name or name in p.provides:
if not repo or p.repo == repo:
if not variant or p.repo_variant == variant:
packages.append((s, p))
return render_template('package.html', packages=packages)
@packages.route('/updates')
@cache_route
def updates() -> RouteResponse:
global state
packages: List[Package] = []
for s in state.sources:
packages.extend(s.packages.values())
packages.sort(key=lambda p: p.builddate, reverse=True)
return render_template('updates.html', packages=packages[:150])
def package_name_is_vcs(package_name: str) -> bool:
return package_name.endswith(
("-cvs", "-svn", "-hg", "-darcs", "-bzr", "-git"))
def get_arch_names(name: str) -> List[str]:
mapping = {
"freetype": "freetype2",
"lzo2": "lzo",
"liblzo2": "lzo",
"python-bsddb3": "python-bsddb",
"graphite2": "graphite",
"mpc": "libmpc",
"eigen3": "eigen",
"python-icu": "python-pyicu",
"python-bsddb3": "python-bsddb",
"python3": "python",
"sqlite3": "sqlite",
"gexiv2": "libgexiv2",
"webkitgtk3": "webkitgtk",
"python2-nuitka": "nuitka",
"python2-ipython": "ipython2",
"gtksourceviewmm3": "gtksourceviewmm",
"librest": "rest",
"gcc-libgfortran": "gcc-fortran",
"meld3": "meld",
"antlr3": "libantlr3c",
"geoclue": "geoclue2",
"python-zope.event": "python-zope-event",
"python-zope.interface": "python-zope-interface",
"tesseract-ocr": "tesseract",
"cmake-doc-qt": "cmake",
"totem-pl-parser": "totem-plparser",
"vulkan-docs": "vulkan-html-docs",
"vulkan-loader": "vulkan-icd-loader",
"vulkan": "vulkan-icd-loader",
"qt-creator": "qtcreator",
"qt5": "qt5-base",
"qt5-static": "qt5-base",
"quassel": "quassel-client",
"spice-gtk": "spice-gtk3",
"libbotan": "botan",
"shiboken-qt4": "shiboken",
"python-ipython": "ipython",
"glob": "google-glog",
"lsqlite3": "lua-sql-sqlite",
"fdk-aac": "libfdk-aac",
"python-jupyter_console": "jupyter_console",
"qscintilla": "qscintilla-qt5",
"attica-qt5": "attica",
"glade3": "glade-gtk2",
"ladspa-sdk": "ladspa",
"libart_lgpl": "libart-lgpl",
"ocaml-camlp4": "camlp4",
"wxwidgets": "wxgtk3",
"transmission": "transmission-gtk",
"perl-ack": "ack",
"glfw": "glfw-x11",
"util-macros": "xorg-util-macros",
"tzcode": "tzdata",
"glog": "google-glog",
"git-flow": "gitflow-avh",
"rabbitmq-c": "librabbitmq-c",
"usrsctp": "libusrsctp",
"matio": "libmatio",
"libgd": "gd",
"python-nbformat": "jupyter-nbformat",
"python-sphinx": "python2-sphinx",
"python-xpra": "xpra",
"python-mallard-ducktype": "mallard-ducktype",
"python-typed_ast": "python-typed-ast",
"python-prometheus-client": "python-prometheus_client",
"python-keras_preprocessing": "python-keras-preprocessing",
"python-nuitka": "nuitka",
"python-absl-py": "absl-py",
"python-pyopengl": "python-opengl",
"python-pyzopfli": "python-zopfli",
"python-path": "python-path.py",
"python-binwalk": "binwalk",
"python-mysql": "mysql-python",
"wxpython": "python2-wxpython3",
"python-nbconvert": "jupyter-nbconvert",
"kicad-doc": "kicad",
"python-keras_applications": "python-keras-applications",
"ag": "the_silver_searcher",
"libmariadbclient": "mariadb-libs",
"antlr4-runtime-cpp": "antlr4-runtime",
"python-notebook": "jupyter-notebook",
"lua-luarocks": "luarocks",
"perl-TermReadKey": "perl-term-readkey",
"qtwebkit": "qt5-webkit",
}
skip = {
"dragon",
}
names: List[str] = []
def add(n: str) -> None:
if n not in names:
names.append(n)
name = name.lower()
if name in skip:
return []
if name in mapping:
add(mapping[name])
add(name)
if name.startswith("python3-"):
name = name.replace("python3-", "python-")
add(name)
if name.startswith("python2-"):
name = name.replace("python2-", "python-")
add(name)
if name.startswith("mingw-w64-cross-"):
name = name.split("-", 3)[-1]
add(name)
if name.endswith("-qt5") or name.endswith("-qt4"):
name = name.rsplit("-", 1)[0]
add(name)
if name in mapping:
name = mapping[name]
add(name)
return names
def is_win_only(name: str) -> bool:
win_only = {
"winpty",
"windows-default-manifest",
"mingw-w64-cross-windows-default-manifest",
"mingw-w64-MinHook",
"msys2-w32api-headers",
"mintty",
"mingw-w64-python-win_unicode_console",
"msys2-keyring",
"cygrunsrv",
"mingw-w64-cccl",
"mingw-w64-dlfcn",
"mingw-w64-drmingw",
"mingw-w64-edd-dbg",
"mingw-w64-editrights",
"mingw-w64-flexdll",
"winln",
"rebase",
"msys2-w32api-runtime",
"msys2-runtime",
"mingw-w64-win7appid",
"mingw-w64-windows-default-manifest",
"mingw-w64-wineditline",
"mingw-w64-winico",
"mingw-w64-winsparkle",
"crypt",
"pacman-mirrors",
"mingw-w64-python-win_inet_pton",
"mingw-w64-python-comtypes",
"mingw-w64-python-wincertstore",
}
return name in win_only
def vercmp(v1: str, v2: str) -> int:
def cmp(a: int, b: int) -> int:
return (a > b) - (a < b)
def split(v: str) -> Tuple[str, str, Optional[str]]:
if "~" in v:
e, v = v.split("~", 1)
else:
e, v = ("0", v)
r: Optional[str] = None
if "-" in v:
v, r = v.rsplit("-", 1)
else:
v, r = (v, None)
return (e, v, r)
digit, alpha, other = range(3)
def get_type(c: str) -> int:
assert c
if c.isdigit():
return digit
elif c.isalpha():
return alpha
else:
return other
def parse(v: str) -> List[Tuple[int, Optional[str]]]:
parts: List[Tuple[int, Optional[str]]] = []
seps = 0
current = ""
for c in v:
if get_type(c) == other:
if current:
parts.append((seps, current))
current = ""
seps += 1
else:
if not current:
current += c
else:
if get_type(c) == get_type(current):
current += c
else:
parts.append((seps, current))
current = c
parts.append((seps, current or None))
return parts
def rpmvercmp(v1: str, v2: str) -> int:
for (s1, p1), (s2, p2) in zip_longest(parse(v1), parse(v2),
fillvalue=(None, None)):
if s1 is not None and s2 is not None:
ret = cmp(s1, s2)
if ret != 0:
return ret
if p1 is None and p2 is None:
return 0
if p1 is None:
if get_type(p2) == alpha:
return 1
return -1
elif p2 is None:
if get_type(p1) == alpha:
return -1
return 1
t1 = get_type(p1)
t2 = get_type(p2)
if t1 != t2:
if t1 == digit:
return 1
elif t2 == digit:
return -1
elif t1 == digit:
ret = cmp(int(p1), int(p2))
if ret != 0:
return ret
elif t1 == alpha:
ret = cmp(p1, p2)
if ret != 0:
return ret
return 0
e1, v1, r1 = split(v1)
e2, v2, r2 = split(v2)
ret = rpmvercmp(e1, e2)
if ret == 0:
ret = rpmvercmp(v1, v2)
if ret == 0 and r1 is not None and r2 is not None:
ret = rpmvercmp(r1, r2)
return ret
def arch_version_to_msys(v: str) -> str:
return v.replace(":", "~")
def version_is_newer_than(v1: str, v2: str) -> bool:
return vercmp(v1, v2) == 1
def update_versions() -> None:
global VERSION_CONFIG, state
print("update versions")
arch_versions: Dict[str, Tuple[str, str, int]] = {}
for (url, repo, variant) in VERSION_CONFIG:
for source in parse_repo(repo, variant, url).values():
msys_ver = arch_version_to_msys(source.version)
for p in source.packages.values():
url = "https://www.archlinux.org/packages/%s/%s/%s/" % (
p.repo, p.arch, p.name)
if p.name in arch_versions:
old_ver = arch_versions[p.name][0]
if version_is_newer_than(msys_ver, old_ver):
arch_versions[p.name] = (msys_ver, url, p.builddate)
else:
arch_versions[p.name] = (msys_ver, url, p.builddate)
url = "https://www.archlinux.org/packages/%s/%s/%s/" % (
source.repos[0], source.arches[0], source.name)
if source.name in arch_versions:
old_ver = arch_versions[source.name][0]
if version_is_newer_than(msys_ver, old_ver):
arch_versions[source.name] = (msys_ver, url, source.date)
else:
arch_versions[source.name] = (msys_ver, url, source.date)
print("done")
print("update versions from AUR")
# a bit hacky, try to get the remaining versions from AUR
possible_names = set()
for s in state.sources:
if package_name_is_vcs(s.name):
continue
for p in s.packages.values():
possible_names.update(get_arch_names(p.realname))
possible_names.update(get_arch_names(s.realname))
r = requests.get("https://aur.archlinux.org/packages.gz",
timeout=REQUEST_TIMEOUT)
aur_packages = set()
for name in r.text.splitlines():
if name.startswith("#"):
continue
if name in arch_versions:
continue
if name not in possible_names:
continue
aur_packages.add(name)
aur_url = (
"https://aur.archlinux.org/rpc/?v=5&type=info&" +
"&".join(["arg[]=%s" % n for n in aur_packages]))
r = requests.get(aur_url, timeout=REQUEST_TIMEOUT)
for result in r.json()["results"]:
name = result["Name"]
if name not in aur_packages or name in arch_versions:
continue
last_modified = result["LastModified"]
url = "https://aur.archlinux.org/packages/%s" % name
arch_versions[name] = (result["Version"], url, last_modified)
print("done")
state.versions = arch_versions
def extract_upstream_version(version: str) -> str:
return version.rsplit(
"-")[0].split("+", 1)[0].split("~", 1)[-1].split(":", 1)[-1]
def get_arch_info_for_base(s: Source) -> Optional[Tuple[str, str, int]]:
"""tuple or None"""
global state
variants = sorted([s.realname] + [p.realname for p in s.packages.values()])
# fallback to the provide names
provides_variants: List[str] = []
for p in s.packages.values():
provides_variants.extend(p.realprovides.keys())
variants += provides_variants
for realname in variants:
for arch_name in get_arch_names(realname):
if arch_name in state.versions:
return state.versions[arch_name]
return None
@packages.route('/outofdate')
@cache_route
def outofdate() -> RouteResponse:
global state
missing = []
win_only = []
to_update = []
all_sources = []
for s in state.sources:
if package_name_is_vcs(s.name):
continue
all_sources.append(s)
arch_info = get_arch_info_for_base(s)
if arch_info is None:
if is_win_only(s.name):
win_only.append(s)
else:
missing.append((s, s.realname))
continue
arch_version, url, date = arch_info
arch_version = extract_upstream_version(arch_version)
msys_version = extract_upstream_version(s.version)
git_version = extract_upstream_version(s.git_version)
if not version_is_newer_than(git_version, msys_version):
git_version = ""
if version_is_newer_than(arch_version, msys_version):
to_update.append((s, msys_version, git_version, arch_version, url, date))
# show packages which have recently been build first.
# assumes high frequency update packages are more important
to_update.sort(key=lambda i: (i[-1], i[0].name), reverse=True)
missing.sort(key=lambda i: i[0].date, reverse=True)
win_only.sort(key=lambda i: i.name)
return render_template(
'outofdate.html',
all_sources=all_sources, to_update=to_update, missing=missing,
win_only=win_only)
@packages.route('/queue')
@cache_route
def queue() -> RouteResponse:
global state
# Create entries for all packages where the version doesn't match
updates = []
for s in state.sources:
for k, p in sorted(s.packages.items()):
if p.name in state.sourceinfos:
srcinfo = state.sourceinfos[p.name]
if package_name_is_vcs(s.name):
continue
if version_is_newer_than(srcinfo.build_version, p.version):
updates.append((srcinfo, s, p))
break
updates.sort(
key=lambda i: (i[0].date, i[0].pkgbase, i[0].pkgname),
reverse=True)
return render_template('queue.html', updates=updates)
@packages.route('/new')
@cache_route
def new() -> RouteResponse:
global state
# Create dummy entries for all GIT only packages
available = {}
for srcinfo in state.sourceinfos.values():
if package_name_is_vcs(srcinfo.pkgbase):
continue
available[srcinfo.pkgbase] = srcinfo
for s in state.sources:
available.pop(s.name, None)
new = list(available.values())
new.sort(
key=lambda i: (i.date, i.pkgbase, i.pkgname),
reverse=True)
return render_template('new.html', new=new)
@packages.route('/removals')
@cache_route
def removals() -> RouteResponse:
global state
# get all packages in the pacman repo which are no in GIT
missing = []
for s in state.sources:
for k, p in s.packages.items():
if p.name not in state.sourceinfos:
missing.append((s, p))
missing.sort(key=lambda i: (i[1].builddate, i[1].name), reverse=True)
return render_template('removals.html', missing=missing)
@packages.route('/python2')
@cache_route
def test() -> RouteResponse:
def is_split_package(p: Package) -> bool:
c = 0
for name, type_ in p.makedepends:
if name == "mingw-w64-x86_64-python3":
c += 1
if name == "mingw-w64-x86_64-python2":
c += 1
if c == 2:
return True
return False
def get_rdep_count(p: Package) -> int:
todo = {p.name: p}
done = set()
while todo:
name, p = todo.popitem()
done.add(name)
for rdep in [x[0] for x in p.rdepends]:
if rdep.name not in done:
todo[rdep.name] = rdep
return len(done) - 1
result = "<ul>"
deps = []
for s in state.sources:
for p in s.packages.values():
if p.name.endswith("-x86_64-python2"):
for rdep in sorted(set([x[0] for x in p.rdepends]), key=lambda y: y.name):
if is_split_package(rdep) and "-python2-" not in rdep.name:
continue
deps.append((get_rdep_count(rdep), rdep))
for c, d in sorted(deps, key=lambda i: (i[0], i[1].name)):
result += "<li>" + d.name + " / %d" % c + "</li>"
result += "</ul>"
return result
@packages.route('/search')
@cache_route
def search() -> str:
global state
query = request.args.get('q', '')
qtype = request.args.get('t', '')
if qtype not in ["pkg", "binpkg"]:
qtype = "pkg"
parts = query.split()
res_pkg: List[Union[Package, Source]] = []
if not query:
pass
elif qtype == "pkg":
for s in state.sources:
if [p for p in parts if p.lower() in s.name.lower()] == parts:
res_pkg.append(s)
res_pkg.sort(key=lambda s: s.name)
elif qtype == "binpkg":
for s in state.sources:
for sub in s.packages.values():
if [p for p in parts if p.lower() in sub.name.lower()] == parts:
res_pkg.append(sub)
res_pkg.sort(key=lambda p: p.name)
return render_template(
'search.html', results=res_pkg, query=query, qtype=qtype)
def trigger_appveyor_build(account: str, project: str, token: str) -> str:
"""Returns an URL for the build or raises RequestException"""
r = requests.post(
"https://ci.appveyor.com/api/builds",
json={
"accountName": account,
"projectSlug": project,
"branch": "master",
},
headers={
"Authorization": "Bearer " + token,
},
timeout=REQUEST_TIMEOUT)
r.raise_for_status()
try:
build_id = r.json()['buildId']
except (ValueError, KeyError):
build_id = 0
return "https://ci.appveyor.com/project/%s/%s/builds/%d" % (
account, project, build_id)
def check_github_signature(request: Request, secret: str) -> bool:
signature = request.headers.get('X-Hub-Signature', '')
mac = hmac.new(secret.encode("utf-8"), request.get_data(), hashlib.sha1)
return hmac.compare_digest("sha1=" + mac.hexdigest(), signature)
@packages.route("/webhook", methods=['POST'])
def github_payload() -> RouteResponse:
secret = os.environ.get("GITHUB_WEBHOOK_SECRET")
if not secret:
abort(500, 'webhook secret config incomplete')
if not check_github_signature(request, secret):
abort(400, 'Invalid signature')
event = request.headers.get('X-GitHub-Event', '')
if event == 'ping':
return jsonify({'msg': 'pong'})
if event == 'push':
account = os.environ.get("APPVEYOR_ACCOUNT")
project = os.environ.get("APPVEYOR_PROJECT")
token = os.environ.get("APPVEYOR_TOKEN")
if not account or not project or not token:
abort(500, 'appveyor config incomplete')
build_url = trigger_appveyor_build(account, project, token)
return jsonify({'msg': 'triggered a build: %s' % build_url})
else:
abort(400, 'Unsupported event type: ' + event)
@contextlib.contextmanager
def check_needs_update(_last_time: List[str] = [""]) -> Generator:
"""Raises RequestException"""
if CACHE_LOCAL:
yield True
return
t = ""
for url in get_update_urls():
r = requests.get(url, stream=True, timeout=REQUEST_TIMEOUT)
r.close()
t += r.headers["last-modified"]
if t != _last_time[0]:
yield True
_last_time[0] = t
else:
yield False
def update_source() -> None:
"""Raises RequestException"""
global state, REPOSITORIES
print("update source")
final: Dict[str, Source] = {}
for repo in REPOSITORIES:
for name, source in parse_repo(repo.name, repo.variant, repo.files_url).items():
if name in final:
final[name].packages.update(source.packages)
else:
final[name] = source
new_sources = [x[1] for x in sorted(final.items())]
fill_rdepends(new_sources)
state.sources = new_sources
def update_sourceinfos() -> None:
global state, SRCINFO_CONFIG
print("update sourceinfos")
url = SRCINFO_CONFIG[0][0]
print("Loading %r" % url)
data = get_content_cached(url, timeout=REQUEST_TIMEOUT)
json_obj = json.loads(data.decode("utf-8"))
result = {}
for hash_, m in json_obj.items():
for pkg in SrcInfoPackage.for_srcinfo(m["srcinfo"], m["repo"], m["path"], m["date"]):
result[pkg.pkgname] = pkg
state.sourceinfos = result
def fill_rdepends(sources: List[Source]) -> None:
deps: Dict[str, Set[Tuple[Package, str]]] = {}
for s in sources:
for p in s.packages.values():
for n, r in p.depends:
deps.setdefault(n, set()).add((p, ""))
for n, r in p.makedepends:
deps.setdefault(n, set()).add((p, "make"))
for n, r in p.optdepends:
deps.setdefault(n, set()).add((p, "optional"))
for n, r in p.checkdepends:
deps.setdefault(n, set()).add((p, "check"))
for s in sources:
for p in s.packages.values():
rdepends = list(deps.get(p.name, set()))
for prov in p.provides:
rdepends += list(deps.get(prov, set()))
p.rdepends = sorted(rdepends, key=lambda e: (e[0].key, e[1]))
# filter out other arches for msys packages
if p.repo_variant:
p.rdepends = [
(op, t) for (op, t) in p.rdepends if
op.repo_variant in (p.repo_variant, "")]
def update_thread() -> None:
global UPDATE_INTERVAL
while True:
try:
print("check for update")
with check_needs_update() as needs:
if needs:
update_source()
update_sourceinfos()
update_versions()
else:
print("not update needed")
except Exception:
traceback.print_exc()
print("Sleeping for %d" % UPDATE_INTERVAL)
time.sleep(UPDATE_INTERVAL)
def start_update_thread() -> None:
thread = threading.Thread(target=update_thread)
thread.daemon = True
thread.start()
class SrcInfoPackage(object):
def __init__(self, pkgbase: str, pkgname: str, pkgver: str, pkgrel: str,
repo: str, repo_path: str, date: str):
self.pkgbase = pkgbase
self.pkgname = pkgname
self.pkgver = pkgver
self.pkgrel = pkgrel
self.repo_url = repo
self.repo_path = repo_path
self.date = date
self.epoch: Optional[str] = None
self.depends: List[str] = []
self.makedepends: List[str] = []
self.sources: List[str] = []
@property
def history_url(self) -> str:
return self.repo_url + ("/commits/master/" + quote(self.repo_path))
@property
def source_url(self) -> str:
return self.repo_url + ("/tree/master/" + quote(self.repo_path))
@property
def build_version(self) -> str:
version = "%s-%s" % (self.pkgver, self.pkgrel)
if self.epoch:
version = "%s~%s" % (self.epoch, version)
return version
def __repr__(self) -> str:
return "<%s %s %s>" % (
type(self).__name__, self.pkgname, self.build_version)
@classmethod
def for_srcinfo(cls, srcinfo: str, repo: str, repo_path: str, date: str) -> "Set[SrcInfoPackage]":
packages = set()
for line in srcinfo.splitlines():
line = line.strip()
if line.startswith("pkgbase = "):
pkgver = pkgrel = epoch = ""
depends = []
makedepends = []
sources = []
pkgbase = line.split(" = ", 1)[-1]
elif line.startswith("depends = "):
depends.append(line.split(" = ", 1)[-1])
elif line.startswith("makedepends = "):
makedepends.append(line.split(" = ", 1)[-1])
elif line.startswith("source = "):
sources.append(line.split(" = ", 1)[-1])
elif line.startswith("pkgver = "):
pkgver = line.split(" = ", 1)[-1]
elif line.startswith("pkgrel = "):
pkgrel = line.split(" = ", 1)[-1]
elif line.startswith("epoch = "):
epoch = line.split(" = ", 1)[-1]
elif line.startswith("pkgname = "):
pkgname = line.split(" = ", 1)[-1]
package = cls(pkgbase, pkgname, pkgver, pkgrel, repo, repo_path, date)
package.epoch = epoch
package.depends = depends
package.makedepends = makedepends
package.sources = sources
packages.add(package)
return packages
app = Flask(__name__)
app.register_blueprint(packages)
app.jinja_env.undefined = StrictUndefined
start_update_thread()
def main(argv: List[str]) -> Optional[Union[int, str]]:
global CACHE_LOCAL
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cache", action="store_true",
help="use local repo cache")
parser.add_argument("-p", "--port", type=int, default=8160,
help="port number")
parser.add_argument("-d", "--debug", action="store_true")
args = parser.parse_args()
CACHE_LOCAL = args.cache
print("http://localhost:%d" % args.port)
app.run(port=args.port, debug=args.debug)
return None
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
sorter.py | #!/usr/bin/env python3
"""
"""
import sys
import os
import argparse
import shutil
import multiprocessing as mp
def main():
"""
"""
argc = len(sys.argv)
if argc < 2:
return 1
filepath = sys.argv[1]
file_lines = sum(1 for line in open(filepath, 'r'))
procs = []
total_procs = os.cpu_count()
file = open(filepath, 'r+')
for i in range(total_procs):
offset = BLOCK_LOW(i, total_procs, file_lines)
size = BLOCK_SIZE(i, total_procs, file_lines)
procs.append(mp.Process(target=sort_section, args=(file, offset, size)))
procs[i].start()
for proc in procs:
proc.join()
return 0
def sort_section(file, offset, size):
lines = []
file.seek(offset)
for _ in range(size):
lines.append(file.readline())
lines.sort()
file.seek(offset)
file.writelines(lines)
def print_block(id, p, n):
print("id: %d, p: %d, n: %d, bl: %d, bh: %d, bs: %d" % (id, p, n, BLOCK_LOW(id, p, n), BLOCK_HIGH(id, p, n), BLOCK_SIZE(id, p, n)))
def BLOCK_LOW(id, p, n):
"""
Returns lower global index of process based on rank, total procs, and task size.
"""
return id * n // p
def BLOCK_HIGH(id, p, n):
"""
Returns upper global index of process based on rank, total procs, and task size.
"""
return BLOCK_LOW(id + 1, p, n) - 1
def BLOCK_SIZE(id, p, n):
"""
Returns size of local data based on rank, total procs, and task size.
"""
return BLOCK_HIGH(id, p, n) - BLOCK_LOW(id, p, n) + 1
if __name__ == '__main__':
main()
|
PicDownloader.py | #!usr/bin/python
# -*- coding: UTF-8 -*-
#python3.x
# ------ 模块 ------
from random import Random #随机数
import re #正则表达式
import urllib.request #http模块
import http.cookiejar #http-cookie模块
import socket #网络编程模块
import os #操作系统模块
import datetime, time #日期时间模块
try:
import chardet # html内容分析模块
except ImportError as e:
chardetSupport = False
else:
chardetSupport = False
try:
import concurrent.futures #Python3.2+ 线程池模块
except ImportError as e:
#print (e)
import threading #多线程模块
poolSupport = False
else:
poolSupport = True
# ------ 全局变量 ------
# 打印日志
printLogEnabled = False
# 是否采集html
collectHtmlEnabled = False
# 网络超时设置 timeout in seconds
timeout = 15
socket.setdefaulttimeout(timeout)
#线程池大小
thePoolSize = 10
# ------ 伪装为浏览器 ---
def makeOpener(head={
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Connection': 'keep-alive',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:57.0) Gecko/20100101 Firefox/57.0'
}):
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
header = []
for key, value in head.items():
elem = (key, value)
header.append(elem)
opener.addheaders = header
return opener
# ------ 获取网页源代码 ---
# url 网页链接地址
def getHtml(url):
# print('url='+url)
oper = makeOpener()
if oper is not None:
page = oper.open(url)
#print ('-----oper----')
else:
req=urllib.request.Request(url)
# 爬虫伪装浏览器
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64; rv:57.0) Gecko/20100101 Firefox/57.0')
page = urllib.request.urlopen(req)
html = page.read()
if collectHtmlEnabled: #是否采集html
with open('html.txt', 'wb') as f:
f.write(html) # 采集到本地文件,来分析
# ------ 修改html对象内的字符编码为UTF-8 ------
if chardetSupport:
cdt = chardet.detect(html)
charset = cdt['encoding'] #用chardet进行内容分析
else:
charset = 'utf8'
try:
result = html.decode(charset)
except:
result = html.decode('gbk')
return result
# ------ 固定长度的随机字符串 ---
def random_str(randomlength=8,chars='AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'):
str = ''
length = len(chars) - 1
random = Random()
for i in range(randomlength):
str+=chars[random.randint(0, length)]
return str
# ------ 根据图片url下载图片 ------
# folderPath 定义图片存放的目录 imgUrl 一个图片的链接地址 index 索引,表示第几个图片
def downloadImg(folderPath, imgUrl, index):
# ------ 异常处理 ------
try:
imgContent = (urllib.request.urlopen(imgUrl)).read()
except urllib.error.URLError as e:
if printLogEnabled : print ('【错误】当前图片无法下载')
return False
except urllib.error.HTTPError as e:
if printLogEnabled : print ('【错误】当前图片下载异常')
return False
else:
imgeNameFromUrl = os.path.basename(imgUrl)
if printLogEnabled : print ('正在下载第'+str(index+1)+'张图片,图片地址:'+str(imgUrl))
# ------ IO处理 ------
isExists=os.path.exists(folderPath)
if not isExists: # 目录不存在,则创建
os.makedirs( folderPath )
#print ('创建目录')
# 图片名命名规则,随机字符串
imgName = imgeNameFromUrl
if len(imgeNameFromUrl) < 8:
imgName = random_str(4) + random_str(1,'123456789') + random_str(2,'0123456789')+"_" + imgeNameFromUrl
filename= folderPath + "\\"+str(imgName)+".jpg"
try:
with open(filename, 'wb') as f:
f.write(imgContent) # 写入本地磁盘
# if printLogEnabled : print ('下载完成第'+str(index+1)+'张图片')
except :
return False
return True
# ------ 批量下载图片 ------
# folderPath 定义图片存放的目录 imgList 多个图片的链接地址
def downloadImgList(folderPath, imgList):
index = 0
# print ('poolSupport='+str(poolSupport))
if not poolSupport:
#print ('多线程模式')
# ------ 多线程编程 ------
threads = []
for imgUrl in imgList:
# if printLogEnabled : print ('准备下载第'+str(index+1)+'张图片')
threads.append(threading.Thread(target=downloadImg,args=(folderPath,imgUrl,index,)))
index += 1
for t in threads:
t.setDaemon(True)
t.start()
t.join() #父线程,等待所有线程结束
if len(imgList) >0 : print ('下载结束,存放图片目录:' + str(folderPath))
else:
#print ('线程池模式')
# ------ 线程池编程 ------
futures = []
# 创建一个最大可容纳N个task的线程池 thePoolSize 为 全局变量
with concurrent.futures.ThreadPoolExecutor(max_workers=thePoolSize) as pool:
for imgUrl in imgList:
# if printLogEnabled : print ('准备下载第'+str(index+1)+'张图片')
futures.append(pool.submit(downloadImg, folderPath, imgUrl, index))
index += 1
result = concurrent.futures.wait(futures, timeout=None, return_when='ALL_COMPLETED')
suc = 0
for f in result.done:
if f.result(): suc +=1
print('下载结束,总数:'+str(len(imgList))+',成功数:'+str(suc)+',存放图片目录:' + str(folderPath))
# ------ 下载百度帖子内所有图片 ------
# folderPath 定义图片存放的目录 url 百度贴吧链接
def downloadImgFromBaidutieba(folderPath='tieba', url='https://tieba.baidu.com/p/5256331871'):
html = getHtml(url)
# ------ 利用正则表达式匹配网页内容找到图片地址 ------
#reg = r'src="(.*?\.jpg)"'
reg = r'src="(.*?/sign=.*?\.jpg)"'
imgre = re.compile(reg);
imgList = re.findall(imgre, html)
print ('找到图片个数:' + str(len(imgList)))
# 下载图片
if len(imgList) >0 : downloadImgList(folderPath, imgList)
# ------ 搜索关键字并下载百度图片 ------
# folderPath 定义图片存放的目录 keyword 搜索的关键字
def downloadImgFromBaiduimage(folderPath='baiduimage', keyword='python'):
w = urllib.request.quote(keyword)
url = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word='+w+'&ct=201326592&v=flip'
html = getHtml(url)
# ------ 利用正则表达式匹配网页内容找到图片地址 ------
imgList = re.findall('"objURL":"(.*?)",',html,re.S)
print ('找到图片个数:' + str(len(imgList)) )
# 下载图片
if len(imgList) >0 : downloadImgList(folderPath, imgList)
# ------ 爬虫搜索并下载美女图片地址:http://huaban.com ------
# folderPath 定义图片存放的目录
def downloadImgFromhuaban(folderPath='huaban'):
url='http://huaban.com/favorite/beauty/'
html = getHtml(url)
# ------ 利用正则表达式匹配网页内容找到图片地址 ------
# "file":{"farm":"farm1", "bucket":"hbimg", "key":"f56166cc72c040ad73574366a26cd15f2f4268b734c18-yVcNOl", "type":"image/jpeg", "width":658, "height":987, "frames":1}
url_re=re.compile(r'"key":"(.+?)"')
imgKeyList = url_re.findall(html)
imgList = []
for key in imgKeyList:
imgList.append('http://img.hb.aicdn.com/' + key)
print ('找到图片个数:' + str(len(imgList)) )
# 下载图片
if len(imgList) >0 : downloadImgList(folderPath, imgList)
# 程序入口
if __name__ == '__main__':
now = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
#搜索关键字并下载百度图片
word = u'皮卡丘'
#downloadImgFromBaiduimage('baiduimage\\'+word+'\\' + now, word)
# 下载百度帖子内所有图片
downloadImgFromBaidutieba('tieba\\'+now, 'https://tieba.baidu.com/p/5256331871')
# 爬虫搜索并下载美女图片地址:http://huaban.com
# downloadImgFromhuaban('huaban\\'+now)
|
patch.py | import sys
import logging
import threading
import time
import socket
import os
from importlib import import_module, reload
from gradient_utils import metrics
logger = logging.getLogger(__name__)
if os.getenv("PAPERSPACE_DEBUG"):
reload(logging)
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%I:%M:%S')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
TENSORBOARD_C_MODULE = "tensorflow.python.ops.gen_summary_ops"
TENSORBOARD_WRITER_MODULE = "tensorboard.summary.writer.event_file_writer"
TENSORBOARDX_WRITER_MODULE = "tensorboardX.event_file_writer"
TENSORBOARD_PYTORCH_MODULE = "torch.utils.tensorboard.writer"
def patch(settings=None):
if len(metrics.patched["tensorboard"]) > 0:
raise ValueError(
"Tensorboard already patched. You may be calling metrics.init() more than once."
)
# Make sure tensorboard is installed
_get_module("tensorboard", required=True)
# Grab specific tensorboard modules for patching
c = _get_module(TENSORBOARD_C_MODULE)
tb = _get_module(TENSORBOARD_WRITER_MODULE)
tbx = _get_module(TENSORBOARDX_WRITER_MODULE)
pt = _get_module(TENSORBOARD_PYTORCH_MODULE)
if not c and not tb and not pt:
raise ValueError(
"Could not find a valid tensorboard module to patch"
)
if c:
_patch_tensorboard(writer=c, module=TENSORBOARD_C_MODULE, settings=settings)
del sys.modules["tensorflow.python.ops.gen_summary_ops"]
if tb:
_patch_tensorboardx(writer=tb, module=TENSORBOARD_WRITER_MODULE, settings=settings)
del sys.modules["tensorboard.summary.writer"]
del sys.modules["tensorboard.summary.writer.event_file_writer"]
if tbx:
_patch_tensorboardx(writer=tbx, module=TENSORBOARDX_WRITER_MODULE, settings=settings)
del sys.modules["tensorboardX"]
del sys.modules["tensorboardX.writer"]
if pt:
_patch_tensorboardx(writer=pt, module=TENSORBOARD_PYTORCH_MODULE, settings=settings)
# uncaching pytorch crashes in user code due to imoprt side effects
def _get_module(name, required=False):
try:
return import_module(name)
except Exception as e:
if required:
raise ValueError(
"Error importing module '{name}'.".format(name=name)
)
def _patch_tensorboard(writer, module, settings):
prev_func = writer.create_summary_file_writer
def new_func(*args, **kwargs):
logdir = (
kwargs["logdir"].numpy().decode("utf8")
if hasattr(kwargs["logdir"], "numpy")
else kwargs["logdir"]
)
on_new_logdir(logdir, settings)
return prev_func(*args, **kwargs)
writer.prev_create_summary_file_writer = prev_func
writer.create_summary_file_writer = new_func
metrics.patched["tensorboard"].append([module, "create_summary_file_writer"])
logger.debug("patching %s.%s", module, "create_summary_file_writer")
def _patch_tensorboardx(writer, module, settings):
prev_class = writer.EventFileWriter
class TBXWriter(prev_class):
def __init__(self, *args, **kwargs):
logdir = kwargs.pop("logdir", None)
if logdir is None:
logdir = args[0]
on_new_logdir(logdir, settings)
super(TBXWriter, self).__init__(*args, **kwargs)
writer.prev_EventFileWriter = prev_class
writer.EventFileWriter = TBXWriter
metrics.patched["tensorboard"].append([module, "EventFileWriter"])
logger.debug("patching %s.%s", module, "EventFileWriter")
def on_new_logdir(logdir, settings):
logger.debug("watching %s", logdir)
watcher = LogdirWatcher(logdir, settings)
settings.tensorboard_watchers.append(watcher)
watcher.start()
class LogdirWatcher():
def __init__(self, logdir, settings):
self._logdir = logdir
self._settings = settings
self._hostname = socket.gethostname()
self.event_file_loader = _get_module("tensorboard.backend.event_processing.event_file_loader", required=True)
self.directory_watcher = _get_module("tensorboard.backend.event_processing.directory_watcher", required=True)
self.tf_compat = _get_module("tensorboard.compat", required=True)
self._generator = self.directory_watcher.DirectoryWatcher(logdir, self.event_file_loader.EventFileLoader,
self._is_new_tfevents_file)
self._thread = threading.Thread(target=self._thread_body)
self._stopped = None
def start(self):
self._thread.start()
def _thread_body(self):
while True:
time.sleep(1)
try:
for event in self._generator.Load():
self._process_event(event)
except self.directory_watcher.DirectoryDeletedError:
logger.debug("watched directory deleted")
break
if self._stopped is not None and self._stopped + 5 < time.time():
logger.debug("watch stopped")
break
def _process_event(self, event):
if event.HasField("summary"):
try:
step = event.step
summary = event.summary
for value in summary.value:
# This line transforms the metric name into one that passes Prometheus name validation
# TODO encode invalid characters instead of transforming them
name = value.tag.replace(".", "_").replace("/", "_").replace(" ", "_")
metric = get_metric_from_summary(value)
if metric:
logger.debug("adding metric %s with value %s", name, metric)
metrics.add_metrics({name: metric}, step=step)
else:
logger.debug("no metric found for %s", name)
except Exception as e:
logger.debug("%s", e)
def _is_new_tfevents_file(self, path=""):
path = self.tf_compat.tf.compat.as_str_any(path)
return is_new_tfevents_file(
path, self._hostname, self._settings.start_time
)
def finish(self):
self._stopped = time.time()
self._thread.join()
def is_new_tfevents_file(path, hostname, start_time):
if path == "":
raise ValueError("Path must be a nonempty string")
# check the filename
basename = os.path.basename(path)
if basename.endswith(".profile-empty"):
return False
fname_components = basename.split(".")
try:
tfevents_idx = fname_components.index("tfevents")
except ValueError:
return False
# check the hostname, which may have dots
for i, part in enumerate(hostname.split(".")):
try:
fname_component_part = fname_components[tfevents_idx + 2 + i]
except IndexError:
return False
if part != fname_component_part:
return False
# check the create time
try:
created_time = int(fname_components[tfevents_idx + 1])
except (ValueError, IndexError):
return False
return created_time >= start_time
def get_metric_from_summary(value):
if value.simple_value:
return value.simple_value
if value.HasField("tensor"):
tensor = value.tensor
if tensor.tensor_content:
# TODO marshal tensor_content
return None
if tensor.dtype == 1 and tensor.float_val:
return tensor.float_val[0]
if tensor.dtype == 2 and tensor.double_val:
return tensor.double_val[0]
if tensor.dtype in [3, 4, 5, 6] and tensor.int_val:
return tensor.int_val[0]
if tensor.dtype == 19 and tensor.half_val:
return tensor.half_val[0]
if tensor.dtype == 9 and tensor.int64_val:
return tensor.int64_val[0]
return None
|
gdaltest_python3.py | # -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Python Library supporting GDAL/OGR Test Suite
# Author: Even Rouault, <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2009-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import urllib.request, urllib.error, urllib.parse
import socket
import subprocess
import shlex
import os
import sys
from queue import Queue
from threading import Thread
def run_func(func):
try:
result = func()
print(result)
return result
except SystemExit as x:
import traceback
traceback.print_exc()
raise x
except:
result = 'fail (blowup)'
print(result)
import traceback
traceback.print_exc()
return result
def urlescape(url):
# Escape any non-ASCII characters
try:
import urllib
url = urllib.parse.quote(url)
except:
pass
return url
def gdalurlopen(url, timeout = 10):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
if 'GDAL_HTTP_PROXY' in os.environ:
proxy = os.environ['GDAL_HTTP_PROXY']
if 'GDAL_HTTP_PROXYUSERPWD' in os.environ:
proxyuserpwd = os.environ['GDAL_HTTP_PROXYUSERPWD']
proxyHandler = urllib.request.ProxyHandler({"http" : \
"http://%s@%s" % (proxyuserpwd, proxy)})
else:
proxyuserpwd = None
proxyHandler = urllib.request.ProxyHandler({"http" : \
"http://%s" % (proxy)})
opener = urllib.request.build_opener(proxyHandler, urllib.request.HTTPHandler)
urllib.request.install_opener(opener)
try:
handle = urllib.request.urlopen(url)
socket.setdefaulttimeout(old_timeout)
return handle
except urllib.error.HTTPError as e:
print('HTTP service for %s is down (HTTP Error: %d)' % (url, e.code))
socket.setdefaulttimeout(old_timeout)
return None
except urllib.error.URLError as e:
print('HTTP service for %s is down (URL Error: %s)' % (url, e.reason))
socket.setdefaulttimeout(old_timeout)
return None
except:
print('HTTP service for %s is down.' %(url))
socket.setdefaulttimeout(old_timeout)
return None
def spawn_async(cmd):
command = shlex.split(cmd)
try:
process = subprocess.Popen(command, stdout=subprocess.PIPE)
return (process, process.stdout)
except:
return (None, None)
def wait_process(process):
process.wait()
def runexternal(cmd, strin = None, check_memleak = True, display_live_on_parent_stdout = False, encoding='latin1'):
command = shlex.split(cmd)
if strin is None:
p = subprocess.Popen(command, stdout=subprocess.PIPE)
else:
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(bytes(strin, 'ascii'))
p.stdin.close()
if p.stdout is not None:
if display_live_on_parent_stdout:
ret = ''
ret_stdout = p.stdout
while True:
c = ret_stdout.read(1).decode(encoding)
if c == '':
break
ret = ret + c
sys.stdout.write(c)
else:
ret = p.stdout.read().decode(encoding)
p.stdout.close()
else:
ret = ''
waitcode = p.wait()
if waitcode != 0:
ret = ret + '\nERROR ret code = %d' % waitcode
return ret
def read_in_thread(f, q):
q.put(f.read())
f.close()
def runexternal_out_and_err(cmd, check_memleak = True):
command = shlex.split(cmd)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.stdout is not None:
q_stdout = Queue()
t_stdout = Thread(target=read_in_thread, args=(p.stdout, q_stdout))
t_stdout.start()
else:
q_stdout = None
ret_stdout = ''
if p.stderr is not None:
q_stderr = Queue()
t_stderr = Thread(target=read_in_thread, args=(p.stderr, q_stderr))
t_stderr.start()
else:
q_stderr = None
ret_stderr = ''
if q_stdout is not None:
ret_stdout = q_stdout.get().decode('ascii')
if q_stderr is not None:
ret_stderr = q_stderr.get().decode('ascii')
waitcode = p.wait()
if waitcode != 0:
ret_stderr = ret_stderr + '\nERROR ret code = %d' % waitcode
return (ret_stdout, ret_stderr)
|
joystick.py | import pyglet.input
import controller
import threading
import player
import math
import time
class Joystick_controller(controller.Controller):
def __init__(self, game):
super().__init__(game)
self.init_joysticks(pyglet.input.get_joysticks())
self.camera_sensitivity = 0.007
self.joystick_deadzone = 0.25
self.update_delay = 0.15
self.last_update = 0
self.joystick_move = [0, 0]
self.joystick_look = [0, 0]
self.joystick_interact = [0, 0]
self.joystick_updater = threading.Thread(target=self.updater, daemon=True, name="Joystick Updater")
self.joystick_updater.start()
def updater(self):
while True:
if len(pyglet.input.get_joysticks()) != len(self.joysticks):
self.init_joysticks(pyglet.input.get_joysticks())
time.sleep(2)
def init_joysticks(self, joysticks):
self.joysticks = joysticks
for joystick in self.joysticks:
joystick.on_joybutton_press = self.on_joybutton_press
joystick.on_joybutton_release = self.on_joybutton_release
joystick.on_joyaxis_motion = self.on_joyaxis_motion
joystick.on_joyhat_motion = self.on_joyhat_motion
joystick.open(exclusive=True)
def update_controller(self):
if not self.game.mouse_captured or not self.joysticks:
return
self.game.player.rotation[0] += self.joystick_look[0] * self.camera_sensitivity
self.game.player.rotation[1] += -self.joystick_look[1] * self.camera_sensitivity
self.game.player.rotation[1] = max(-math.tau / 4, min(math.tau / 4, self.game.player.rotation[1]))
if round(max(self.joystick_interact)) > 0 and (self.last_update + self.update_delay) <= time.process_time():
if round(self.joystick_interact[0]) > 0: self.interact(self.InteractMode.BREAK)
if round(self.joystick_interact[1]) > 0: self.interact(self.InteractMode.PLACE)
self.last_update = time.process_time()
def on_joybutton_press(self, joystick, button):
if "xbox" in joystick.device.name.lower():
if button == 1: self.misc(self.MiscMode.RANDOM)
elif button == 2: self.interact(self.InteractMode.PICK)
elif button == 3: self.misc(self.MiscMode.SAVE)
elif button == 0: self.start_move(self.MoveMode.UP)
elif button == 9: self.start_move(self.MoveMode.DOWN)
elif button == 8:
if self.game.player.target_speed == player.SPRINTING_SPEED: self.end_modifier(self.ModifierMode.SPRINT)
elif self.game.player.target_speed == player.WALKING_SPEED: self.start_modifier(self.ModifierMode.SPRINT)
elif "wireless controller" == joystick.device.name.lower():
if button == 2: self.misc(self.MiscMode.RANDOM)
elif button == 0: self.interact(self.InteractMode.PICK)
elif button == 3: self.misc(self.MiscMode.SAVE)
elif button == 1: self.start_move(self.MoveMode.UP)
elif button == 11: self.start_move(self.MoveMode.DOWN)
elif button == 10:
if self.game.player.target_speed == player.SPRINTING_SPEED: self.end_modifier(self.ModifierMode.SPRINT)
elif self.game.player.target_speed == player.WALKING_SPEED: self.start_modifier(self.ModifierMode.SPRINT)
def on_joybutton_release(self, joystick, button):
if "xbox" in joystick.device.name.lower():
if button == 0: self.end_move(self.MoveMode.UP)
elif button == 9: self.end_move(self.MoveMode.DOWN)
elif "wireless controller" == joystick.device.name.lower():
if button == 1: self.end_move(self.MoveMode.UP)
elif button == 11: self.end_move(self.MoveMode.DOWN)
def on_joyaxis_motion(self, joystick, axis, value):
if abs(value) < self.joystick_deadzone:
value = 0
if "xbox" in joystick.device.name.lower():
if axis == "x":
if math.ceil(value) > 0 and self.joystick_move[0] == 0: self.start_move(self.MoveMode.RIGHT)
elif math.floor(value) < 0 and self.joystick_move[0] == 0: self.start_move(self.MoveMode.LEFT)
elif value == 0 and math.ceil(self.joystick_move[0]) > 0: self.end_move(self.MoveMode.RIGHT)
elif value == 0 and math.floor(self.joystick_move[0]) < 0: self.end_move(self.MoveMode.LEFT)
self.joystick_move[0] = value
elif axis == "y":
if math.ceil(value) > 0 and self.joystick_move[1] == 0: self.start_move(self.MoveMode.BACKWARD)
elif math.floor(value) < 0 and self.joystick_move[1] == 0: self.start_move(self.MoveMode.FORWARD)
elif value == 0 and math.ceil(self.joystick_move[1]) > 0: self.end_move(self.MoveMode.BACKWARD)
elif value == 0 and math.floor(self.joystick_move[1]) < 0: self.end_move(self.MoveMode.FORWARD)
self.joystick_move[1] = value
if axis == "rx": self.joystick_look[0] = value
if axis == "ry": self.joystick_look[1] = value
if axis == "z":
if value < 0: self.joystick_interact[0] = -value
if value > 0: self.joystick_interact[1] = value
elif "wireless controller" == joystick.device.name.lower():
if axis == "x":
if math.ceil(value) > 0 and self.joystick_move[0] == 0: self.start_move(self.MoveMode.RIGHT)
elif math.floor(value) < 0 and self.joystick_move[0] == 0: self.start_move(self.MoveMode.LEFT)
elif value == 0 and math.ceil(self.joystick_move[0]) > 0: self.end_move(self.MoveMode.RIGHT)
elif value == 0 and math.floor(self.joystick_move[0]) < 0: self.end_move(self.MoveMode.LEFT)
self.joystick_move[0] = value
elif axis == "y":
if math.ceil(value) > 0 and self.joystick_move[1] == 0: self.start_move(self.MoveMode.BACKWARD)
elif math.floor(value) < 0 and self.joystick_move[1] == 0: self.start_move(self.MoveMode.FORWARD)
elif value == 0 and math.ceil(self.joystick_move[1]) > 0: self.end_move(self.MoveMode.BACKWARD)
elif value == 0 and math.floor(self.joystick_move[1]) < 0: self.end_move(self.MoveMode.FORWARD)
self.joystick_move[1] = value
if axis == "z": self.joystick_look[0] = value
if axis == "rz": self.joystick_look[1] = value
if axis == "rx": self.joystick_interact[0] = value
if axis == "ry": self.joystick_interact[1] = value
print(axis)
def on_joyhat_motion(self, joystick, hat_x, hat_y):
pass |
serial_io.py | #!/usr/bin/env python3
import serial
import threading
from threading import Thread
import sys
from glob import glob
class SerialIO():
# methods we should call to move the robot around
# should probably go in a wrapper class but that
# would be too enterprise for us
def forward(self, speed = 48):
self.write(b'f', bytes([speed]))
def backward(self, speed = 48):
self.write(b'b', bytes([speed]))
def left(self, speed = 48):
self.write(b'l', bytes([speed]))
def right(self, speed = 48):
self.write(b'r', bytes([speed]))
# Direct drive the motors,
# left and right are the speeds of the motors
# and are in the range [-255, 255] and are integers
def direct(self, left = 48, right = 48):
# decode the directions based off the sign of the inputs
dirs = 0
dirs += (left >= 0)
dirs += (right >= 0) << 1
# arduino no like negative numbers
if left < 0:
left *= -1
if right < 0:
right *= -1
if left > 255:
left = 255
if right > 255:
right = 255
self.lock.acquire()
self.buffer[0] = b'd'
self.buffer[1] = bytes([dirs])
self.buffer[2] = bytes([left])
self.buffer[3] = bytes([right])
self.lock.release()
def stop(self):
self.write(b's')
def __init__(self, com=None, baud=115200, delay=10):
# initialize data structure to keep shizz
# put lockfile, write buffer
self.baud = baud
self.delay = delay
self.running = 1
# buffer is [command, speed]
self.buffer = [0, 0, 0, 0]
self.check = 0
self.lock = threading.RLock()
self.distances = {'left': 201,'right': 201,'middle': 201} # start off with error values for distance
# better than silently failing
# Also jenk AF way to find current port arduino connected to but
# ¯\_(ツ)_/¯
port = glob('/dev/ttyACM*')
if len(port) == 0:
sys.stderr.write('No arduino connected!'+ '\n')
exit(1)
port = port[0]
self.ser = serial.Serial(port, self.baud, timeout = self.delay)
# wait for startup time
# Toggle DTR to reset Arduino
self.ser.setDTR(False)
# toss any data already received, see
# http://pyserial.sourceforge.net/pyserial_api.html#serial.Serial.flushInput
self.ser.flushInput()
self.ser.setDTR(True)
print(self.ser.read())
def start(self):
# start the thread that constantly does serial reading
# if the serial port doesn't exist the thing crashes sometimes
Thread(target=self.update, args=()).start()
def read(self, dir):
"""
dir = one of 'left', 'right', 'middle'
"""
# returns the data structure, be sure to check lockfile
return self.distances[dir]
def check_return(self):
# Return current check (write back from arudino)
self.lock.acquire()
c = self.check
self.lock.release()
return c
def write(self, m, speed = 48):
"""
One of b'f', b'b', b's', b'l', b'r'
second arguement is the motor speed (optional)
"""
# Don't want to let the user poll the sensors themselves
if m not in [b'x', b'y', b'z']:
self.lock.acquire()
self.buffer[0] = m
self.buffer[1] = speed
self.lock.release()
# sends the write buffer, be sure to check lockfile. Returns delay between when the write was sent and when
# this was called.
def update(self):
while self.running:
# getting distances via polling now
# or not because reasons
'''
self.ser.write(b'x')
self.distances['left'] = self.ser.read()
self.ser.write(b'y')
self.distances['middle'] = self.ser.read()
self.ser.write(b'z')
self.distances['right'] = self.ser.read()
'''
# do the handshakes to read, write if necessary, then delay
self.lock.acquire()
if self.buffer[0]:
# Write command then speed
self.ser.write(self.buffer[0])
# print('writing: {}'.format(self.buffer[0]))
if self.buffer[0] in [b'f', b'b', b'l', b'r']:
self.ser.write(self.buffer[1])
# print('also writing: {}'.format(self.buffer[1][0]))
# wait for arduino to write back
if self.buffer[0] == b'd':
self.ser.write(self.buffer[1])
self.ser.write(self.buffer[3])
self.ser.write(self.buffer[3])
c = self.ser.read(1)
self.check = c
# jenky way to check if message in buffer
self.buffer[0] = 0
self.lock.release()
|
server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC server implementation.
Note
----
Server is TCP based with the following protocol:
- Initial handshake to the peer
- [RPC_MAGIC, keysize(int32), key-bytes]
- The key is in format
- {server|client}:device-type[:random-key] [-timeout=timeout]
"""
# pylint: disable=invalid-name
import ctypes
import socket
import select
import struct
import logging
import threading
import multiprocessing
import time
import errno
import tvm._ffi
from tvm._ffi.base import py_str
from tvm._ffi.libinfo import find_lib_path
from tvm.runtime.module import load_module as _load_module
from tvm.contrib import utils
from tvm.contrib.popen_pool import PopenWorker
from . import _ffi_api
from . import base
# pylint: disable=unused-import
from . import testing
from .base import TrackerCode
logger = logging.getLogger("RPCServer")
def _server_env(load_library, work_path=None):
"""Server environment function return temp dir"""
if work_path:
temp = work_path
else:
temp = utils.tempdir()
# pylint: disable=unused-variable
@tvm._ffi.register_func("tvm.rpc.server.workpath", override=True)
def get_workpath(path):
return temp.relpath(path)
@tvm._ffi.register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
m = _load_module(path)
logger.info("load_module %s", path)
return m
@tvm._ffi.register_func("tvm.rpc.server.download_linked_module", override=True)
def download_linked_module(file_name):
"""Load module from remote side."""
# pylint: disable=import-outside-toplevel
path = temp.relpath(file_name)
if path.endswith(".o"):
# Extra dependencies during runtime.
from tvm.contrib import cc as _cc
_cc.create_shared(path + ".so", path)
path += ".so"
elif path.endswith(".tar"):
# Extra dependencies during runtime.
from tvm.contrib import cc as _cc, tar as _tar
tar_temp = utils.tempdir(custom_path=path.replace(".tar", ""))
_tar.untar(path, tar_temp.temp_dir)
files = [tar_temp.relpath(x) for x in tar_temp.listdir()]
_cc.create_shared(path + ".so", files)
path += ".so"
elif path.endswith(".dylib") or path.endswith(".so"):
pass
else:
raise RuntimeError("Do not know how to link %s" % file_name)
logger.info("Send linked module %s to client", path)
return bytearray(open(path, "rb").read())
libs = []
load_library = load_library.split(":") if load_library else []
for file_name in load_library:
file_name = find_lib_path(file_name)[0]
libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))
logger.info("Load additional library %s", file_name)
temp.libs = libs
return temp
def _serve_loop(sock, addr, load_library, work_path=None):
"""Server loop"""
sockfd = sock.fileno()
temp = _server_env(load_library, work_path)
_ffi_api.ServerLoop(sockfd)
if not work_path:
temp.remove()
logger.info("Finish serving %s", addr)
def _parse_server_opt(opts):
# parse client options
ret = {}
for kv in opts:
if kv.startswith("-timeout="):
ret["timeout"] = float(kv[9:])
return ret
def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
"""Listening loop of the server."""
def _accept_conn(listen_sock, tracker_conn, ping_period=2):
"""Accept connection from the other places.
Parameters
----------
listen_sock: Socket
The socket used by listening process.
tracker_conn : connection to tracker
Tracker connection
ping_period : float, optional
ping tracker every k seconds if no connection is accepted.
"""
old_keyset = set()
# Report resource to tracker
if tracker_conn:
matchkey = base.random_key(rpc_key + ":")
base.sendjson(tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
else:
matchkey = rpc_key
unmatch_period_count = 0
unmatch_timeout = 4
# Wait until we get a valid connection
while True:
if tracker_conn:
trigger = select.select([listen_sock], [], [], ping_period)
if not listen_sock in trigger[0]:
base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = base.recvjson(tracker_conn)
old_keyset.add(matchkey)
# if match key not in pending key set
# it means the key is acquired by a client but not used.
if matchkey not in pending_keys:
unmatch_period_count += 1
else:
unmatch_period_count = 0
# regenerate match key if key is acquired but not used for a while
if unmatch_period_count * ping_period > unmatch_timeout + ping_period:
logger.info("no incoming connections, regenerate key ...")
matchkey = base.random_key(rpc_key + ":", old_keyset)
base.sendjson(
tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr]
)
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
unmatch_period_count = 0
continue
conn, addr = listen_sock.accept()
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:" + matchkey
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
conn.close()
logger.warning("mismatch key from %s", addr)
continue
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
return conn, addr, _parse_server_opt(arr[1:])
# Server logic
tracker_conn = None
while True:
try:
# step 1: setup tracker and report to tracker
if tracker_addr and tracker_conn is None:
tracker_conn = base.connect_with_retry(tracker_addr)
tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
raise RuntimeError("%s is not RPC Tracker" % str(tracker_addr))
# report status of current queue
cinfo = {"key": "server:" + rpc_key, "addr": (custom_addr, port)}
base.sendjson(tracker_conn, [TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
# step 2: wait for in-coming connections
conn, addr, opts = _accept_conn(sock, tracker_conn)
except (socket.error, IOError):
# retry when tracker is dropped
if tracker_conn:
tracker_conn.close()
tracker_conn = None
continue
except RuntimeError as exc:
raise exc
# step 3: serving
work_path = utils.tempdir()
logger.info("connection from %s", addr)
server_proc = multiprocessing.Process(
target=_serve_loop, args=(conn, addr, load_library, work_path)
)
server_proc.start()
# close from our side.
conn.close()
# wait until server process finish or timeout
server_proc.join(opts.get("timeout", None))
if server_proc.is_alive():
logger.info("Timeout in RPC session, kill..")
# pylint: disable=import-outside-toplevel
import psutil
parent = psutil.Process(server_proc.pid)
# terminate worker children
for child in parent.children(recursive=True):
child.terminate()
# terminate the worker
server_proc.terminate()
work_path.remove()
def _connect_proxy_loop(addr, key, load_library):
key = "server:" + key
retry_count = 0
max_retry = 5
retry_period = 5
while True:
try:
sock = socket.socket(base.get_addr_family(addr), socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("<i", base.RPC_MAGIC))
sock.sendall(struct.pack("<i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
if magic == base.RPC_CODE_MISMATCH:
logger.warning("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
keylen = struct.unpack("<i", base.recvall(sock, 4))[0]
remote_key = py_str(base.recvall(sock, keylen))
opts = _parse_server_opt(remote_key.split()[1:])
logger.info("connected to %s", str(addr))
process = multiprocessing.Process(target=_serve_loop, args=(sock, addr, load_library))
process.start()
sock.close()
process.join(opts.get("timeout", None))
if process.is_alive():
logger.info("Timeout in RPC session, kill..")
process.terminate()
retry_count = 0
except (socket.error, IOError) as err:
retry_count += 1
logger.warning("Error encountered %s, retry in %g sec", str(err), retry_period)
if retry_count > max_retry:
raise RuntimeError("Maximum retry error: last error: %s" % str(err))
time.sleep(retry_period)
class PopenRPCServerState(object):
"""Internal PopenRPCServer State"""
current = None
def __init__(
self,
host,
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
):
# start update
self.host = host
self.port = port
self.libs = []
self.custom_addr = custom_addr
if silent:
logger.setLevel(logging.ERROR)
if not is_proxy:
sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [errno.EADDRINUSE]:
continue
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.thread = threading.Thread(
target=_listen_loop,
args=(self.sock, self.port, key, tracker_addr, load_library, self.custom_addr),
)
self.thread.start()
else:
self.thread = threading.Thread(
target=_connect_proxy_loop, args=((host, port), key, load_library)
)
self.thread.start()
def _popen_start_rpc_server(
host,
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
no_fork=False,
server_init_callback=None,
):
if no_fork:
multiprocessing.set_start_method("spawn")
if server_init_callback:
server_init_callback()
# This is a function that will be sent to the
# Popen worker to run on a separate process.
# Create and start the server in a different thread
state = PopenRPCServerState(
host, port, port_end, is_proxy, tracker_addr, key, load_library, custom_addr, silent
)
PopenRPCServerState.current = state
# returns the port so that the main can get the port number.
return state.port
class Server(object):
"""Start RPC server on a separate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based server with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
tracker_addr: Tuple (str, int) , optional
The address of RPC Tracker in tuple(host, ip) format.
If is not None, the server will register itself to the tracker.
key : str, optional
The key used to identify the device type in tracker.
load_library : str, optional
List of additional libraries to be loaded during execution.
custom_addr: str, optional
Custom IP Address to Report to RPC Tracker
silent: bool, optional
Whether run this server in silent mode.
no_fork: bool, optional
Whether forbid fork in multiprocessing.
server_init_callback: Callable, optional
Additional initialization function when starting the server.
Note
----
The RPC server only sees functions in the tvm namespace.
To bring additional custom functions to the server env, you can use server_init_callback.
.. code:: python
def server_init_callback():
import tvm
# must import mypackage here
import mypackage
tvm.register_func("function", mypackage.func)
server = rpc.Server(host, server_init_callback=server_init_callback)
"""
def __init__(
self,
host="0.0.0.0",
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
no_fork=False,
server_init_callback=None,
):
try:
if _ffi_api.ServerLoop is None:
raise RuntimeError("Please compile with USE_RPC=1")
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
self.proc = PopenWorker()
# send the function
self.proc.send(
_popen_start_rpc_server,
[
host,
port,
port_end,
is_proxy,
tracker_addr,
key,
load_library,
custom_addr,
silent,
no_fork,
server_init_callback,
],
)
# receive the port
self.port = self.proc.recv()
self.host = host
def terminate(self):
"""Terminate the server process"""
if self.proc:
self.proc.kill()
self.proc = None
def __del__(self):
self.terminate()
|
tests.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import time
import zlib
from datetime import datetime, timedelta
from io import BytesIO
try:
import threading
except ImportError:
import dummy_threading as threading
from django.conf import settings
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
from django.core.files.base import File, ContentFile
from django.core.files.images import get_image_dimensions
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import UploadedFile
from django.test import SimpleTestCase
from django.utils import six
from django.utils import unittest
from django.utils._os import upath
from django.test.utils import override_settings
from ..servers.tests import LiveServerBase
# Try to import PIL in either of the two ways it can end up installed.
# Checking for the existence of Image is enough for CPython, but
# for PyPy, you need to check for the underlying modules
try:
from PIL import Image, _imaging
except ImportError:
try:
import Image, _imaging
except ImportError:
Image = None
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImproperlyConfigured,
"Error importing module storage: \"No module named '?storage'?\""):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaisesMessage(
ImproperlyConfigured,
'Module "django.core.files.storage" does not define a '
'"NonExistingStorage" attribute/class',
get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImproperlyConfigured,
"Error importing module django.core.files.non_existing_storage: "
"\"No module named '?(django.core.files.)?non_existing_storage'?\""):
get_storage_class(
'django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_emtpy_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.accessed_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.created_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.modified_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file',
ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""),
"""/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""")
# should stanslate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""),
"""/test_media_url/a/b/c.file""")
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
f = self.storage.save('storage_test_1', ContentFile('custom content'))
f = self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), set(['storage_dir_1']))
self.assertEqual(set(files),
set(['storage_test_1', 'storage_test_2']))
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case),
temp_storage.path(mixed_case))
temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file',
ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file',
ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
self.assertRaises(OSError,
self.storage.save, 'error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
self.assertRaises(OSError, self.storage.delete, 'error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behaviour when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class UnicodeFileNameTests(unittest.TestCase):
def test_unicode_file_names(self):
"""
Regression test for #8156: files with unicode names I can't quite figure
out the encoding situation between doctest and this file, but the actual
repr doesn't matter; it just shouldn't return a unicode object.
"""
uf = UploadedFile(name='¿Cómo?',content_type='text')
self.assertEqual(type(uf.__repr__()), str)
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
name = self.save_file('conflict')
self.thread.join()
self.assertTrue(self.storage.exists('conflict'))
self.assertTrue(self.storage.exists('conflict_1'))
self.storage.delete('conflict')
self.storage.delete('conflict_1')
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test_1')))
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test_1')))
class DimensionClosingBug(unittest.TestCase):
"""
Test that get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "PIL not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = BytesIO()
try:
get_image_dimensions(empty_io)
finally:
self.assertTrue(not empty_io.closed)
@unittest.skipUnless(Image, "PIL not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper(object):
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
from django.core.files import images
images.open = catching_open
try:
get_image_dimensions(os.path.join(os.path.dirname(upath(__file__)), "test1.png"))
finally:
del images.open
self.assertTrue(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
Test that get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "PIL not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
from django.core.files.images import ImageFile
img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png")
image = ImageFile(open(img_path, 'rb'))
image_pil = Image.open(img_path)
size_1, size_2 = get_image_dimensions(image), get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
@unittest.skipUnless(Image, "PIL not installed")
def test_bug_19457(self):
"""
Regression test for #19457
get_image_dimensions fails on some pngs, while Image.size is working good on them
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "magic.png")
try:
size = get_image_dimensions(img_path)
except zlib.error:
self.fail("Exception raised from get_image_dimensions().")
self.assertEqual(size, Image.open(img_path).size)
class ContentFileTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_file_default_name(self):
self.assertEqual(ContentFile(b"content").name, None)
def test_content_file_custom_name(self):
"""
Test that the constructor of ContentFile accepts 'name' (#16590).
"""
name = "I can have a name too!"
self.assertEqual(ContentFile(b"content", name=name).name, name)
def test_content_file_input_type(self):
"""
Test that ContentFile can accept both bytes and unicode and that the
retrieved content is of the same type.
"""
self.assertTrue(isinstance(ContentFile(b"content").read(), bytes))
if six.PY3:
self.assertTrue(isinstance(ContentFile("español").read(), six.text_type))
else:
self.assertTrue(isinstance(ContentFile("español").read(), bytes))
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
class NoNameFileTestCase(unittest.TestCase):
"""
Other examples of unnamed files may be tempfile.SpooledTemporaryFile or
urllib.urlopen()
"""
def test_noname_file_default_name(self):
self.assertEqual(File(BytesIO(b'A file with no name')).name, None)
def test_noname_file_get_size(self):
self.assertEqual(File(BytesIO(b'A file with no name')).size, 19)
class FileLikeObjectTestCase(LiveServerBase):
"""
Test file-like objects (#15644).
"""
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = self.urlopen('/example_view/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
stored_file = self.storage.open(stored_filename)
remote_file = self.urlopen('/example_view/')
self.assertEqual(stored_file.read(), remote_file.read())
|
agent.py | import multiprocessing
# disable bundler
from utils.replay_memory import Memory
from utils.torch import *
# enable bundler
import math
import time
def collect_samples(pid, queue, env, policy, custom_reward,
mean_action, render, running_state, min_batch_size):
torch.randn(pid)
log = dict()
memory = Memory()
num_steps = 0
total_reward = 0
min_reward = 1e6
max_reward = -1e6
total_c_reward = 0
min_c_reward = 1e6
max_c_reward = -1e6
num_episodes = 0
while num_steps < min_batch_size:
state = env.reset()
if running_state is not None:
state = running_state(state)
reward_episode = 0
for t in range(10000):
state_var = tensor(state).unsqueeze(0)
with torch.no_grad():
if mean_action:
action = policy(state_var)[0][0].numpy()
else:
action = policy.select_action(state_var)[0].numpy()
action = int(action) if policy.is_disc_action else action.astype(np.float64)
next_state, reward, done, _ = env.step(action)
reward_episode += reward
if running_state is not None:
next_state = running_state(next_state)
if custom_reward is not None:
reward = custom_reward(state, action)
total_c_reward += reward
min_c_reward = min(min_c_reward, reward)
max_c_reward = max(max_c_reward, reward)
mask = 0 if done else 1
memory.push(state, action, mask, next_state, reward)
if render:
env.render()
if done:
break
state = next_state
# log stats
num_steps += (t + 1)
num_episodes += 1
total_reward += reward_episode
min_reward = min(min_reward, reward_episode)
max_reward = max(max_reward, reward_episode)
log['num_steps'] = num_steps
log['num_episodes'] = num_episodes
log['total_reward'] = total_reward
log['avg_reward'] = total_reward / num_episodes
log['max_reward'] = max_reward
log['min_reward'] = min_reward
if custom_reward is not None:
log['total_c_reward'] = total_c_reward
log['avg_c_reward'] = total_c_reward / num_steps
log['max_c_reward'] = max_c_reward
log['min_c_reward'] = min_c_reward
if queue is not None:
queue.put([pid, memory, log])
else:
return memory, log
def merge_log(log_list):
log = dict()
log['total_reward'] = sum([x['total_reward'] for x in log_list])
log['num_episodes'] = sum([x['num_episodes'] for x in log_list])
log['num_steps'] = sum([x['num_steps'] for x in log_list])
log['avg_reward'] = log['total_reward'] / log['num_episodes']
log['max_reward'] = max([x['max_reward'] for x in log_list])
log['min_reward'] = min([x['min_reward'] for x in log_list])
if 'total_c_reward' in log_list[0]:
log['total_c_reward'] = sum([x['total_c_reward'] for x in log_list])
log['avg_c_reward'] = log['total_c_reward'] / log['num_steps']
log['max_c_reward'] = max([x['max_c_reward'] for x in log_list])
log['min_c_reward'] = min([x['min_c_reward'] for x in log_list])
return log
class Agent:
def __init__(self, env, policy, device, custom_reward=None,
mean_action=False, render=False, running_state=None, num_threads=1):
self.env = env
self.policy = policy
self.device = device
self.custom_reward = custom_reward
self.mean_action = mean_action
self.running_state = running_state
self.render = render
self.num_threads = num_threads
def collect_samples(self, min_batch_size):
t_start = time.time()
to_device(torch.device('cpu'), self.policy)
thread_batch_size = int(math.floor(min_batch_size / self.num_threads))
queue = multiprocessing.Queue()
workers = []
for i in range(self.num_threads-1):
worker_args = (i+1, queue, self.env, self.policy, self.custom_reward, self.mean_action,
False, self.running_state, thread_batch_size)
workers.append(multiprocessing.Process(target=collect_samples, args=worker_args))
for worker in workers:
worker.start()
memory, log = collect_samples(0, None, self.env, self.policy, self.custom_reward, self.mean_action,
self.render, self.running_state, thread_batch_size)
worker_logs = [None] * len(workers)
worker_memories = [None] * len(workers)
for _ in workers:
pid, worker_memory, worker_log = queue.get()
worker_memories[pid - 1] = worker_memory
worker_logs[pid - 1] = worker_log
for worker_memory in worker_memories:
memory.append(worker_memory)
batch = memory.sample()
if self.num_threads > 1:
log_list = [log] + worker_logs
log = merge_log(log_list)
to_device(self.device, self.policy)
t_end = time.time()
log['sample_time'] = t_end - t_start
log['action_mean'] = np.mean(np.vstack(batch.action), axis=0)
log['action_min'] = np.min(np.vstack(batch.action), axis=0)
log['action_max'] = np.max(np.vstack(batch.action), axis=0)
return batch, log
def evaluate(self, num_episodes = 100, max_steps_per_episode = 10000):
env = self.env
total_reward = 0
s = 'episode, step, reward\n'
for episode in range(num_episodes):
state = env.reset()
done = False
steps = 0
while not done and steps < max_steps_per_episode:
state_var = tensor(state).unsqueeze(0)
with torch.no_grad():
if self.mean_action:
action = self.policy(state_var)[0][0].numpy()
else:
action = self.policy.select_action(state_var)[0].numpy()
action = int(action) if self.policy.is_disc_action else action.astype(np.float64)
state, reward, done, _ = env.step(action)
total_reward += reward
if reward > 0:
s += str(episode) + ',' + str(steps) + ',' + str(reward) + '\n'
steps += 1
return total_reward / num_episodes
|
window.py | import tkinter as tk
from tkinter import scrolledtext
from tkinter import font
from tkinter import ttk
import sys
import threading
import queue
import signal
WINDOW_INTERVAL = 40 # Time in ms between screen refresh of text
def addToQueue(q):
while True:
try:
line = sys.stdin.readline()
except:
q.put("") # Trigger quit from main thread
break
q.put(line)
if line == "":
break
print("Exiting reader thread.")
def destroyWindow():
root.destroy()
print("Post window closed.")
def processText(q):
quit = False
try:
while True:
line = q.get_nowait()
if line == "": quit = True
textbox.insert(tk.END, line)
textbox.see("end")
except queue.Empty:
if not quit:
root.after(WINDOW_INTERVAL, processText, q)
else:
root.after(WINDOW_INTERVAL, destroyWindow)
def openingLine(q):
textbox.insert(tk.END, "Post Window for SuperCollider:\n")
root.after(WINDOW_INTERVAL, processText, q)
def zoomIn():
font.configure(size=font.cget("size") + 2)
def zoomOut():
font.configure(size=font.cget("size") - 2)
# Main code to run
q = queue.Queue()
t = threading.Thread(target=addToQueue, args=(q,))
t.daemon = True
root = tk.Tk()
root.geometry("600x500") # initial size but can be resizable
root.resizable(True, True)
# Font buttons
font = font.Font(family="Courier", size=14)
button_frame = tk.Frame(root)
button_frame.pack(fill="x")
# Top label and buttons
label=tk.Label(button_frame, text="SC Post Window")
zin = tk.Button(button_frame, text="Zoom In", command=zoomIn)
zout = tk.Button(button_frame, text="Zoom Out", command=zoomOut)
label.pack(side="left")
zin.pack(side="left")
zout.pack(side="left")
# Horizontal Separator
separator = ttk.Separator(root, orient="horizontal")
separator.pack(side="top", fill="x")
# Textbox
# Adding the width=1 and height=1 is necessary so that resizing font
# does not change the window size
textbox = scrolledtext.ScrolledText(root, width=1, height=1, font=font)
textbox.pack(expand=True, fill="both")
root.after(0, openingLine, q)
# Install signal handler
def sig_handler(signum, frame):
destroyWindow()
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
# Run display
print("Starting worker thread to process stdin and start main graphics loop...")
t.start()
root.mainloop()
print("Exiting...")
|
msq_config_sender.py | import paho.mqtt.client as mqtt
import threading
import json
from enum import Enum
class UpdateType(Enum):
EMOTIONS_ONLY = 0
RAW_ONLY = 1
ALL = 2
class EnumEncoder(json.JSONEncoder):
def default(self, obj):
if type(obj) is UpdateType:
return str(obj.value)
return json.JSONEncoder.default(self, obj)
class ConfigSender:
def __init__(self):
try:
with open('communication.json', 'r') as json_file:
config = json.load(json_file)
self.config_topic = config["BASE_TOPIC"] + config["CONFIGURATION_TOPIC_SUFFIX"]
self.client = mqtt.Client("jaRobot")
self.client.connect(config["BROKER_IP_OR_NAME"], int(config["BROKER_PORT"]))
threading.Thread(target=lambda: self.client.loop_forever()).start()
except Exception as es:
print("init ", str(es))
def send_config(self, update_type: UpdateType = None,
update_cycle_on: bool = None,
tick_length: int = None):
try:
config = {}
if update_cycle_on is not None:
config['UPDATE_CYCLE_ON'] = update_cycle_on
if update_type is not None:
config['UPDATE_TYPE'] = update_type
if tick_length is not None:
config['TICK_LENGTH'] = tick_length
self.client.publish(self.config_topic, json.dumps(config, cls=EnumEncoder), qos=2)
print("sent config message " + json.dumps(config, cls=EnumEncoder))
print("topic: " + self.config_topic)
except Exception as es:
print("send_config error ", str(es))
|
kb_eukrepServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_eukrep.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_eukrep'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_eukrep.kb_eukrepImpl import kb_eukrep # noqa @IgnorePep8
impl_kb_eukrep = kb_eukrep(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_eukrep'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_eukrep.run_kb_eukrep,
name='kb_eukrep.run_kb_eukrep',
types=[dict])
self.method_authentication['kb_eukrep.run_kb_eukrep'] = 'required' # noqa
self.rpc_service.add(impl_kb_eukrep.status,
name='kb_eukrep.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_eukrep ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
imaplib2.py | #!/usr/bin/env python
"""Threaded IMAP4 client.
Based on RFC 3501 and original imaplib module.
Public classes: IMAP4
IMAP4_SSL
IMAP4_stream
Public functions: Internaldate2Time
ParseFlags
Time2Internaldate
"""
__all__ = ("IMAP4", "IMAP4_SSL", "IMAP4_stream",
"Internaldate2Time", "ParseFlags", "Time2Internaldate")
__version__ = "2.33"
__release__ = "2"
__revision__ = "33"
__credits__ = """
Authentication code contributed by Donn Cave <donn@u.washington.edu> June 1998.
String method conversion by ESR, February 2001.
GET/SETACL contributed by Anthony Baxter <anthony@interlink.com.au> April 2001.
IMAP4_SSL contributed by Tino Lange <Tino.Lange@isg.de> March 2002.
GET/SETQUOTA contributed by Andreas Zeidler <az@kreativkombinat.de> June 2002.
PROXYAUTH contributed by Rick Holbert <holbert.13@osu.edu> November 2002.
IDLE via threads suggested by Philippe Normand <phil@respyre.org> January 2005.
GET/SETANNOTATION contributed by Tomas Lindroos <skitta@abo.fi> June 2005.
COMPRESS/DEFLATE contributed by Bron Gondwana <brong@brong.net> May 2009.
STARTTLS from Jython's imaplib by Alan Kennedy.
ID contributed by Dave Baggett <dave@baggett.org> November 2009.
Improved untagged responses handling suggested by Dave Baggett <dave@baggett.org> November 2009.
Improved thread naming, and 0 read detection contributed by Grant Edwards <grant.b.edwards@gmail.com> June 2010.
Improved timeout handling contributed by Ivan Vovnenko <ivovnenko@gmail.com> October 2010.
Timeout handling further improved by Ethan Glasser-Camp <glasse@cs.rpi.edu> December 2010.
Time2Internaldate() patch to match RFC2060 specification of English month names from bugs.python.org/issue11024 March 2011.
starttls() bug fixed with the help of Sebastian Spaeth <sebastian@sspaeth.de> April 2011.
Threads now set the "daemon" flag (suggested by offlineimap-project) April 2011.
Single quoting introduced with the help of Vladimir Marek <vladimir.marek@oracle.com> August 2011."""
__author__ = "Piers Lauder <piers@janeelix.com>"
__URL__ = "http://imaplib2.sourceforge.net"
__license__ = "Python License"
import binascii, errno, os, Queue, random, re, select, socket, sys, time, threading, zlib
select_module = select
# Globals
CRLF = '\r\n'
Debug = None # Backward compatibility
IMAP4_PORT = 143
IMAP4_SSL_PORT = 993
IDLE_TIMEOUT_RESPONSE = '* IDLE TIMEOUT\r\n'
IDLE_TIMEOUT = 60*29 # Don't stay in IDLE state longer
READ_POLL_TIMEOUT = 30 # Without this timeout interrupted network connections can hang reader
READ_SIZE = 32768 # Consume all available in socket
DFLT_DEBUG_BUF_LVL = 3 # Level above which the logging output goes directly to stderr
AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first
# Commands
CMD_VAL_STATES = 0
CMD_VAL_ASYNC = 1
NONAUTH, AUTH, SELECTED, LOGOUT = 'NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'
Commands = {
# name valid states asynchronous
'APPEND': ((AUTH, SELECTED), False),
'AUTHENTICATE': ((NONAUTH,), False),
'CAPABILITY': ((NONAUTH, AUTH, SELECTED), True),
'CHECK': ((SELECTED,), True),
'CLOSE': ((SELECTED,), False),
'COMPRESS': ((AUTH,), False),
'COPY': ((SELECTED,), True),
'CREATE': ((AUTH, SELECTED), True),
'DELETE': ((AUTH, SELECTED), True),
'DELETEACL': ((AUTH, SELECTED), True),
'EXAMINE': ((AUTH, SELECTED), False),
'EXPUNGE': ((SELECTED,), True),
'FETCH': ((SELECTED,), True),
'GETACL': ((AUTH, SELECTED), True),
'GETANNOTATION':((AUTH, SELECTED), True),
'GETQUOTA': ((AUTH, SELECTED), True),
'GETQUOTAROOT': ((AUTH, SELECTED), True),
'ID': ((NONAUTH, AUTH, LOGOUT, SELECTED), True),
'IDLE': ((SELECTED,), False),
'LIST': ((AUTH, SELECTED), True),
'LOGIN': ((NONAUTH,), False),
'LOGOUT': ((NONAUTH, AUTH, LOGOUT, SELECTED), False),
'LSUB': ((AUTH, SELECTED), True),
'MYRIGHTS': ((AUTH, SELECTED), True),
'NAMESPACE': ((AUTH, SELECTED), True),
'NOOP': ((NONAUTH, AUTH, SELECTED), True),
'PARTIAL': ((SELECTED,), True),
'PROXYAUTH': ((AUTH,), False),
'RENAME': ((AUTH, SELECTED), True),
'SEARCH': ((SELECTED,), True),
'SELECT': ((AUTH, SELECTED), False),
'SETACL': ((AUTH, SELECTED), False),
'SETANNOTATION':((AUTH, SELECTED), True),
'SETQUOTA': ((AUTH, SELECTED), False),
'SORT': ((SELECTED,), True),
'STARTTLS': ((NONAUTH,), False),
'STATUS': ((AUTH, SELECTED), True),
'STORE': ((SELECTED,), True),
'SUBSCRIBE': ((AUTH, SELECTED), False),
'THREAD': ((SELECTED,), True),
'UID': ((SELECTED,), True),
'UNSUBSCRIBE': ((AUTH, SELECTED), False),
}
UID_direct = ('SEARCH', 'SORT', 'THREAD')
def Int2AP(num):
"""string = Int2AP(num)
Return 'num' converted to a string using characters from the set 'A'..'P'
"""
val, a2p = [], 'ABCDEFGHIJKLMNOP'
num = int(abs(num))
while num:
num, mod = divmod(num, 16)
val.insert(0, a2p[mod])
return ''.join(val)
class Request(object):
"""Private class to represent a request awaiting response."""
def __init__(self, parent, name=None, callback=None, cb_arg=None, cb_self=False):
self.parent = parent
self.name = name
self.callback = callback # Function called to process result
if not cb_self:
self.callback_arg = cb_arg # Optional arg passed to "callback"
else:
self.callback_arg = (self, cb_arg) # Self reference required in callback arg
self.tag = '%s%s' % (parent.tagpre, parent.tagnum)
parent.tagnum += 1
self.ready = threading.Event()
self.response = None
self.aborted = None
self.data = None
def abort(self, typ, val):
self.aborted = (typ, val)
self.deliver(None)
def get_response(self, exc_fmt=None):
self.callback = None
if __debug__: self.parent._log(3, '%s:%s.ready.wait' % (self.name, self.tag))
self.ready.wait()
if self.aborted is not None:
typ, val = self.aborted
if exc_fmt is None:
exc_fmt = '%s - %%s' % typ
raise typ(exc_fmt % str(val))
return self.response
def deliver(self, response):
if self.callback is not None:
self.callback((response, self.callback_arg, self.aborted))
return
self.response = response
self.ready.set()
if __debug__: self.parent._log(3, '%s:%s.ready.set' % (self.name, self.tag))
class IMAP4(object):
"""Threaded IMAP4 client class.
Instantiate with:
IMAP4(host=None, port=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None)
host - host's name (default: localhost);
port - port number (default: standard IMAP4 port);
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response (default: no timeout),
debug_buf_lvl - debug level at which buffering is turned off.
All IMAP4rev1 commands are supported by methods of the same name.
Each command returns a tuple: (type, [data, ...]) where 'type'
is usually 'OK' or 'NO', and 'data' is either the text from the
tagged response, or untagged results from command. Each 'data' is
either a string, or a tuple. If a tuple, then the first part is the
header of the response, and the second part contains the data (ie:
'literal' value).
Errors raise the exception class <instance>.error("<reason>").
IMAP4 server errors raise <instance>.abort("<reason>"), which is
a sub-class of 'error'. Mailbox status changes from READ-WRITE to
READ-ONLY raise the exception class <instance>.readonly("<reason>"),
which is a sub-class of 'abort'.
"error" exceptions imply a program error.
"abort" exceptions imply the connection should be reset, and
the command re-tried.
"readonly" exceptions imply the command should be re-tried.
All commands take two optional named arguments:
'callback' and 'cb_arg'
If 'callback' is provided then the command is asynchronous, so after
the command is queued for transmission, the call returns immediately
with the tuple (None, None).
The result will be posted by invoking "callback" with one arg, a tuple:
callback((result, cb_arg, None))
or, if there was a problem:
callback((None, cb_arg, (exception class, reason)))
Otherwise the command is synchronous (waits for result). But note
that state-changing commands will both block until previous commands
have completed, and block subsequent commands until they have finished.
All (non-callback) arguments to commands are converted to strings,
except for AUTHENTICATE, and the last argument to APPEND which is
passed as an IMAP4 literal. If necessary (the string contains any
non-printing characters or white-space and isn't enclosed with
either parentheses or double or single quotes) each string is
quoted. However, the 'password' argument to the LOGIN command is
always quoted. If you want to avoid having an argument string
quoted (eg: the 'flags' argument to STORE) then enclose the string
in parentheses (eg: "(\Deleted)"). If you are using "sequence sets"
containing the wildcard character '*', then enclose the argument
in single quotes: the quotes will be removed and the resulting
string passed unquoted. Note also that you can pass in an argument
with a type that doesn't evaluate to 'basestring' (eg: 'bytearray')
and it will be converted to a string without quoting.
There is one instance variable, 'state', that is useful for tracking
whether the client needs to login to the server. If it has the
value "AUTH" after instantiating the class, then the connection
is pre-authenticated (otherwise it will be "NONAUTH"). Selecting a
mailbox changes the state to be "SELECTED", closing a mailbox changes
back to "AUTH", and once the client has logged out, the state changes
to "LOGOUT" and no further commands may be issued.
Note: to use this module, you must read the RFCs pertaining to the
IMAP4 protocol, as the semantics of the arguments to each IMAP4
command are left to the invoker, not to mention the results. Also,
most IMAP servers implement a sub-set of the commands available here.
Note also that you must call logout() to shut down threads before
discarding an instance.
"""
class error(Exception): pass # Logical errors - debug required
class abort(error): pass # Service errors - close and retry
class readonly(abort): pass # Mailbox status changed to READ-ONLY
continuation_cre = re.compile(r'\+( (?P<data>.*))?')
literal_cre = re.compile(r'.*{(?P<size>\d+)}$')
mapCRLF_cre = re.compile(r'\r\n|\r|\n')
# Need to quote "atom-specials" :-
# "(" / ")" / "{" / SP / 0x00 - 0x1f / 0x7f / "%" / "*" / DQUOTE / "\" / "]"
# so match not the inverse set
mustquote_cre = re.compile(r"[^!#$&'+,./0-9:;<=>?@A-Z\[^_`a-z|}~-]")
response_code_cre = re.compile(r'\[(?P<type>[A-Z-]+)( (?P<data>[^\]]*))?\]')
# sequence_set_cre = re.compile(r"^[0-9]+(:([0-9]+|\*))?(,[0-9]+(:([0-9]+|\*))?)*$")
untagged_response_cre = re.compile(r'\* (?P<type>[A-Z-]+)( (?P<data>.*))?')
untagged_status_cre = re.compile(r'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?')
def __init__(self, host=None, port=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.state = NONAUTH # IMAP4 protocol state
self.literal = None # A literal argument to a command
self.tagged_commands = {} # Tagged commands awaiting response
self.untagged_responses = [] # [[typ: [data, ...]], ...]
self.mailbox = None # Current mailbox selected
self.mailboxes = {} # Untagged responses state per mailbox
self.is_readonly = False # READ-ONLY desired state
self.idle_rqb = None # Server IDLE Request - see _IdleCont
self.idle_timeout = None # Must prod server occasionally
self._expecting_data = 0 # Expecting message data
self._accumulated_data = [] # Message data accumulated so far
self._literal_expected = None # Message data descriptor
self.compressor = None # COMPRESS/DEFLATE if not None
self.decompressor = None
# Create unique tag for this session,
# and compile tagged response matcher.
self.tagnum = 0
self.tagpre = Int2AP(random.randint(4096, 65535))
self.tagre = re.compile(r'(?P<tag>'
+ self.tagpre
+ r'\d+) (?P<type>[A-Z]+) (?P<data>.*)')
if __debug__: self._init_debug(debug, debug_file, debug_buf_lvl)
self.resp_timeout = timeout # Timeout waiting for command response
if timeout is not None and timeout < READ_POLL_TIMEOUT:
self.read_poll_timeout = timeout
else:
self.read_poll_timeout = READ_POLL_TIMEOUT
self.read_size = READ_SIZE
# Open socket to server.
self.open(host, port)
if __debug__:
if debug:
self._mesg('connected to %s on port %s' % (self.host, self.port))
# Threading
if identifier is not None:
self.identifier = identifier
else:
self.identifier = self.host
if self.identifier:
self.identifier += ' '
self.Terminate = self.TerminateReader = False
self.state_change_free = threading.Event()
self.state_change_pending = threading.Lock()
self.commands_lock = threading.Lock()
self.idle_lock = threading.Lock()
self.ouq = Queue.Queue(10)
self.inq = Queue.Queue()
self.wrth = threading.Thread(target=self._writer)
self.wrth.setDaemon(True)
self.wrth.start()
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
self.inth = threading.Thread(target=self._handler)
self.inth.setDaemon(True)
self.inth.start()
# Get server welcome message,
# request and store CAPABILITY response.
try:
self.welcome = self._request_push(tag='continuation').get_response('IMAP4 protocol error: %s')[1]
if self._get_untagged_response('PREAUTH'):
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
elif self._get_untagged_response('OK'):
if __debug__: self._log(1, 'state => NONAUTH')
else:
raise self.error('unrecognised server welcome message: %s' % `self.welcome`)
typ, dat = self.capability()
if dat == [None]:
raise self.error('no CAPABILITY response from server')
self.capabilities = tuple(dat[-1].upper().split())
if __debug__: self._log(1, 'CAPABILITY: %r' % (self.capabilities,))
for version in AllowedVersions:
if not version in self.capabilities:
continue
self.PROTOCOL_VERSION = version
break
else:
raise self.error('server not IMAP4 compliant')
except:
self._close_threads()
raise
def __getattr__(self, attr):
# Allow UPPERCASE variants of IMAP4 command methods.
if attr in Commands:
return getattr(self, attr.lower())
raise AttributeError("Unknown IMAP4 command: '%s'" % attr)
# Overridable methods
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup connection to remote server on "host:port"
(default: localhost:standard IMAP4 port).
This connection will be used by the routines:
read, send, shutdown, socket."""
self.host = self._choose_nonull_or_dflt('', host)
self.port = self._choose_nonull_or_dflt(IMAP4_PORT, port)
self.sock = self.open_socket()
self.read_fd = self.sock.fileno()
def open_socket(self):
"""open_socket()
Open socket choosing first address family available."""
msg = (-1, 'could not open socket')
for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error, msg:
continue
try:
for i in (0, 1):
try:
s.connect(sa)
break
except socket.error, msg:
if len(msg.args) < 2 or msg.args[0] != errno.EINTR:
raise
else:
raise socket.error(msg)
except socket.error, msg:
s.close()
continue
break
else:
raise socket.error(msg)
return s
def ssl_wrap_socket(self):
# Allow sending of keep-alive messages - seems to prevent some servers
# from closing SSL, leading to deadlocks.
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
try:
import ssl
if self.ca_certs is not None:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile, ca_certs=self.ca_certs, cert_reqs=cert_reqs)
ssl_exc = ssl.SSLError
self.read_fd = self.sock.fileno()
except ImportError:
# No ssl module, and socket.ssl has no fileno(), and does not allow certificate verification
raise socket.sslerror("imaplib2 SSL mode does not work without ssl module")
if self.cert_verify_cb is not None:
cert_err = self.cert_verify_cb(self.sock.getpeercert(), self.host)
if cert_err:
raise ssl_exc(cert_err)
def start_compressing(self):
"""start_compressing()
Enable deflate compression on the socket (RFC 4978)."""
# rfc 1951 - pure DEFLATE, so use -15 for both windows
self.decompressor = zlib.decompressobj(-15)
self.compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
def read(self, size):
"""data = read(size)
Read at most 'size' bytes from remote."""
if self.decompressor is None:
return self.sock.recv(size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = self.sock.recv(READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""send(data)
Send 'data' to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
self.sock.sendall(data)
def shutdown(self):
"""shutdown()
Close I/O established in "open"."""
self.sock.close()
def socket(self):
"""socket = socket()
Return socket instance used to connect to IMAP4 server."""
return self.sock
# Utility methods
def enable_compression(self):
"""enable_compression()
Ask the server to start compressing the connection.
Should be called from user of this class after instantiation, as in:
if 'COMPRESS=DEFLATE' in imapobj.capabilities:
imapobj.enable_compression()"""
try:
typ, dat = self._simple_command('COMPRESS', 'DEFLATE')
if typ == 'OK':
self.start_compressing()
if __debug__: self._log(1, 'Enabled COMPRESS=DEFLATE')
finally:
self._release_state_change()
def pop_untagged_responses(self):
""" for typ,data in pop_untagged_responses(): pass
Generator for any remaining untagged responses.
Returns and removes untagged responses in order of reception.
Use at your own risk!"""
while self.untagged_responses:
self.commands_lock.acquire()
try:
yield self.untagged_responses.pop(0)
finally:
self.commands_lock.release()
def recent(self, **kw):
"""(typ, [data]) = recent()
Return 'RECENT' responses if any exist,
else prompt server for an update using the 'NOOP' command.
'data' is None if no new messages,
else list of RECENT responses, most recent last."""
name = 'RECENT'
typ, dat = self._untagged_response(None, [None], name)
if dat != [None]:
return self._deliver_dat(typ, dat, kw)
kw['untagged_response'] = name
return self.noop(**kw) # Prod server for response
def response(self, code, **kw):
"""(code, [data]) = response(code)
Return data for response 'code' if received, or None.
Old value for response 'code' is cleared."""
typ, dat = self._untagged_response(code, [None], code.upper())
return self._deliver_dat(typ, dat, kw)
# IMAP4 commands
def append(self, mailbox, flags, date_time, message, **kw):
"""(typ, [data]) = append(mailbox, flags, date_time, message)
Append message to named mailbox.
All args except `message' can be None."""
name = 'APPEND'
if not mailbox:
mailbox = 'INBOX'
if flags:
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags
else:
flags = None
if date_time:
date_time = Time2Internaldate(date_time)
else:
date_time = None
self.literal = self.mapCRLF_cre.sub(CRLF, message)
try:
return self._simple_command(name, mailbox, flags, date_time, **kw)
finally:
self._release_state_change()
def authenticate(self, mechanism, authobject, **kw):
"""(typ, [data]) = authenticate(mechanism, authobject)
Authenticate command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - it must appear in <instance>.capabilities in the
form AUTH=<mechanism>.
'authobject' must be a callable object:
data = authobject(response)
It will be called to process server continuation responses.
It should return data that will be encoded and sent to server.
It should return None if the client abort response '*' should
be sent instead."""
self.literal = _Authenticator(authobject).process
try:
typ, dat = self._simple_command('AUTHENTICATE', mechanism.upper())
if typ != 'OK':
self._deliver_exc(self.error, dat[-1], kw)
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
finally:
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def capability(self, **kw):
"""(typ, [data]) = capability()
Fetch capabilities list from server."""
name = 'CAPABILITY'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def check(self, **kw):
"""(typ, [data]) = check()
Checkpoint mailbox on server."""
return self._simple_command('CHECK', **kw)
def close(self, **kw):
"""(typ, [data]) = close()
Close currently selected mailbox.
Deleted messages are removed from writable mailbox.
This is the recommended command before 'LOGOUT'."""
if self.state != 'SELECTED':
raise self.error('No mailbox selected.')
try:
typ, dat = self._simple_command('CLOSE')
finally:
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def copy(self, message_set, new_mailbox, **kw):
"""(typ, [data]) = copy(message_set, new_mailbox)
Copy 'message_set' messages onto end of 'new_mailbox'."""
return self._simple_command('COPY', message_set, new_mailbox, **kw)
def create(self, mailbox, **kw):
"""(typ, [data]) = create(mailbox)
Create new mailbox."""
return self._simple_command('CREATE', mailbox, **kw)
def delete(self, mailbox, **kw):
"""(typ, [data]) = delete(mailbox)
Delete old mailbox."""
return self._simple_command('DELETE', mailbox, **kw)
def deleteacl(self, mailbox, who, **kw):
"""(typ, [data]) = deleteacl(mailbox, who)
Delete the ACLs (remove any rights) set for who on mailbox."""
return self._simple_command('DELETEACL', mailbox, who, **kw)
def examine(self, mailbox='INBOX', **kw):
"""(typ, [data]) = examine(mailbox='INBOX')
Select a mailbox for READ-ONLY access. (Flushes all untagged responses.)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via "response('FLAGS')" etc."""
return self.select(mailbox=mailbox, readonly=True, **kw)
def expunge(self, **kw):
"""(typ, [data]) = expunge()
Permanently remove deleted items from selected mailbox.
Generates 'EXPUNGE' response for each deleted message.
'data' is list of 'EXPUNGE'd message numbers in order received."""
name = 'EXPUNGE'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def fetch(self, message_set, message_parts, **kw):
"""(typ, [data, ...]) = fetch(message_set, message_parts)
Fetch (parts of) messages.
'message_parts' should be a string of selected parts
enclosed in parentheses, eg: "(UID BODY[TEXT])".
'data' are tuples of message part envelope and data,
followed by a string containing the trailer."""
name = 'FETCH'
kw['untagged_response'] = name
return self._simple_command(name, message_set, message_parts, **kw)
def getacl(self, mailbox, **kw):
"""(typ, [data]) = getacl(mailbox)
Get the ACLs for a mailbox."""
kw['untagged_response'] = 'ACL'
return self._simple_command('GETACL', mailbox, **kw)
def getannotation(self, mailbox, entry, attribute, **kw):
"""(typ, [data]) = getannotation(mailbox, entry, attribute)
Retrieve ANNOTATIONs."""
kw['untagged_response'] = 'ANNOTATION'
return self._simple_command('GETANNOTATION', mailbox, entry, attribute, **kw)
def getquota(self, root, **kw):
"""(typ, [data]) = getquota(root)
Get the quota root's resource usage and limits.
(Part of the IMAP4 QUOTA extension defined in rfc2087.)"""
kw['untagged_response'] = 'QUOTA'
return self._simple_command('GETQUOTA', root, **kw)
def getquotaroot(self, mailbox, **kw):
# Hmmm, this is non-std! Left for backwards-compatibility, sigh.
# NB: usage should have been defined as:
# (typ, [QUOTAROOT responses...]) = getquotaroot(mailbox)
# (typ, [QUOTA responses...]) = response('QUOTA')
"""(typ, [[QUOTAROOT responses...], [QUOTA responses...]]) = getquotaroot(mailbox)
Get the list of quota roots for the named mailbox."""
typ, dat = self._simple_command('GETQUOTAROOT', mailbox)
typ, quota = self._untagged_response(typ, dat, 'QUOTA')
typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT')
return self._deliver_dat(typ, [quotaroot, quota], kw)
def id(self, *kv_pairs, **kw):
"""(typ, [data]) = <instance>.id(kv_pairs)
'kv_pairs' is a possibly empty list of keys and values.
'data' is a list of ID key value pairs or NIL.
NB: a single argument is assumed to be correctly formatted and is passed through unchanged
(for backward compatibility with earlier version).
Exchange information for problem analysis and determination.
The ID extension is defined in RFC 2971. """
name = 'ID'
kw['untagged_response'] = name
if not kv_pairs:
data = 'NIL'
elif len(kv_pairs) == 1:
data = kv_pairs[0] # Assume invoker passing correctly formatted string (back-compat)
else:
data = '(%s)' % ' '.join([(arg and self._quote(arg) or 'NIL') for arg in kv_pairs])
return self._simple_command(name, (data,), **kw)
def idle(self, timeout=None, **kw):
""""(typ, [data]) = idle(timeout=None)
Put server into IDLE mode until server notifies some change,
or 'timeout' (secs) occurs (default: 29 minutes),
or another IMAP4 command is scheduled."""
name = 'IDLE'
self.literal = _IdleCont(self, timeout).process
try:
return self._simple_command(name, **kw)
finally:
self._release_state_change()
def list(self, directory='""', pattern='*', **kw):
"""(typ, [data]) = list(directory='""', pattern='*')
List mailbox names in directory matching pattern.
'data' is list of LIST responses.
NB: for 'pattern':
% matches all except separator ( so LIST "" "%" returns names at root)
* matches all (so LIST "" "*" returns whole directory tree from root)"""
name = 'LIST'
kw['untagged_response'] = name
return self._simple_command(name, directory, pattern, **kw)
def login(self, user, password, **kw):
"""(typ, [data]) = login(user, password)
Identify client using plaintext password.
NB: 'password' will be quoted."""
try:
typ, dat = self._simple_command('LOGIN', user, self._quote(password))
if typ != 'OK':
self._deliver_exc(self.error, dat[-1], kw)
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
finally:
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def login_cram_md5(self, user, password, **kw):
"""(typ, [data]) = login_cram_md5(user, password)
Force use of CRAM-MD5 authentication."""
self.user, self.password = user, password
return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH, **kw)
def _CRAM_MD5_AUTH(self, challenge):
"""Authobject to use with CRAM-MD5 authentication."""
import hmac
return self.user + " " + hmac.HMAC(self.password, challenge).hexdigest()
def logout(self, **kw):
"""(typ, [data]) = logout()
Shutdown connection to server.
Returns server 'BYE' response.
NB: You must call this to shut down threads before discarding an instance."""
self.state = LOGOUT
if __debug__: self._log(1, 'state => LOGOUT')
try:
try:
typ, dat = self._simple_command('LOGOUT')
except:
typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]]
if __debug__: self._log(1, dat)
self._close_threads()
finally:
self._release_state_change()
if __debug__: self._log(1, 'connection closed')
bye = self._get_untagged_response('BYE', leave=True)
if bye:
typ, dat = 'BYE', bye
return self._deliver_dat(typ, dat, kw)
def lsub(self, directory='""', pattern='*', **kw):
"""(typ, [data, ...]) = lsub(directory='""', pattern='*')
List 'subscribed' mailbox names in directory matching pattern.
'data' are tuples of message part envelope and data."""
name = 'LSUB'
kw['untagged_response'] = name
return self._simple_command(name, directory, pattern, **kw)
def myrights(self, mailbox, **kw):
"""(typ, [data]) = myrights(mailbox)
Show my ACLs for a mailbox (i.e. the rights that I have on mailbox)."""
name = 'MYRIGHTS'
kw['untagged_response'] = name
return self._simple_command(name, mailbox, **kw)
def namespace(self, **kw):
"""(typ, [data, ...]) = namespace()
Returns IMAP namespaces ala rfc2342."""
name = 'NAMESPACE'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def noop(self, **kw):
"""(typ, [data]) = noop()
Send NOOP command."""
if __debug__: self._dump_ur(3)
return self._simple_command('NOOP', **kw)
def partial(self, message_num, message_part, start, length, **kw):
"""(typ, [data, ...]) = partial(message_num, message_part, start, length)
Fetch truncated part of a message.
'data' is tuple of message part envelope and data.
NB: obsolete."""
name = 'PARTIAL'
kw['untagged_response'] = 'FETCH'
return self._simple_command(name, message_num, message_part, start, length, **kw)
def proxyauth(self, user, **kw):
"""(typ, [data]) = proxyauth(user)
Assume authentication as 'user'.
(Allows an authorised administrator to proxy into any user's mailbox.)"""
try:
return self._simple_command('PROXYAUTH', user, **kw)
finally:
self._release_state_change()
def rename(self, oldmailbox, newmailbox, **kw):
"""(typ, [data]) = rename(oldmailbox, newmailbox)
Rename old mailbox name to new."""
return self._simple_command('RENAME', oldmailbox, newmailbox, **kw)
def search(self, charset, *criteria, **kw):
"""(typ, [data]) = search(charset, criterion, ...)
Search mailbox for matching messages.
'data' is space separated list of matching message numbers."""
name = 'SEARCH'
kw['untagged_response'] = name
if charset:
return self._simple_command(name, 'CHARSET', charset, *criteria, **kw)
return self._simple_command(name, *criteria, **kw)
def select(self, mailbox='INBOX', readonly=False, **kw):
"""(typ, [data]) = select(mailbox='INBOX', readonly=False)
Select a mailbox. (Restores any previous untagged responses.)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via "response('FLAGS')" etc."""
self.commands_lock.acquire()
# Save state of old mailbox, restore state for new...
self.mailboxes[self.mailbox] = self.untagged_responses
self.untagged_responses = self.mailboxes.setdefault(mailbox, [])
self.commands_lock.release()
self.mailbox = mailbox
self.is_readonly = readonly and True or False
if readonly:
name = 'EXAMINE'
else:
name = 'SELECT'
try:
rqb = self._command(name, mailbox)
typ, dat = rqb.get_response('command: %s => %%s' % rqb.name)
if typ != 'OK':
if self.state == SELECTED:
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
if typ == 'BAD':
self._deliver_exc(self.error, '%s command error: %s %s. Data: %.100s' % (name, typ, dat, mailbox), kw)
return self._deliver_dat(typ, dat, kw)
self.state = SELECTED
if __debug__: self._log(1, 'state => SELECTED')
finally:
self._release_state_change()
if self._get_untagged_response('READ-ONLY', leave=True) and not readonly:
if __debug__: self._dump_ur(1)
self._deliver_exc(self.readonly, '%s is not writable' % mailbox, kw)
typ, dat = self._untagged_response(typ, [None], 'EXISTS')
return self._deliver_dat(typ, dat, kw)
def setacl(self, mailbox, who, what, **kw):
"""(typ, [data]) = setacl(mailbox, who, what)
Set a mailbox acl."""
try:
return self._simple_command('SETACL', mailbox, who, what, **kw)
finally:
self._release_state_change()
def setannotation(self, *args, **kw):
"""(typ, [data]) = setannotation(mailbox[, entry, attribute]+)
Set ANNOTATIONs."""
kw['untagged_response'] = 'ANNOTATION'
return self._simple_command('SETANNOTATION', *args, **kw)
def setquota(self, root, limits, **kw):
"""(typ, [data]) = setquota(root, limits)
Set the quota root's resource limits."""
kw['untagged_response'] = 'QUOTA'
try:
return self._simple_command('SETQUOTA', root, limits, **kw)
finally:
self._release_state_change()
def sort(self, sort_criteria, charset, *search_criteria, **kw):
"""(typ, [data]) = sort(sort_criteria, charset, search_criteria, ...)
IMAP4rev1 extension SORT command."""
name = 'SORT'
if (sort_criteria[0],sort_criteria[-1]) != ('(',')'):
sort_criteria = '(%s)' % sort_criteria
kw['untagged_response'] = name
return self._simple_command(name, sort_criteria, charset, *search_criteria, **kw)
def starttls(self, keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None, **kw):
"""(typ, [data]) = starttls(keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None)
Start TLS negotiation as per RFC 2595."""
name = 'STARTTLS'
if name not in self.capabilities:
raise self.abort('TLS not supported by server')
if hasattr(self, '_tls_established') and self._tls_established:
raise self.abort('TLS session already established')
# Must now shutdown reader thread after next response, and restart after changing read_fd
self.read_size = 1 # Don't consume TLS handshake
self.TerminateReader = True
try:
typ, dat = self._simple_command(name)
finally:
self._release_state_change()
self.rdth.join()
self.TerminateReader = False
self.read_size = READ_SIZE
if typ != 'OK':
# Restart reader thread and error
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
raise self.error("Couldn't establish TLS session: %s" % dat)
self.keyfile = keyfile
self.certfile = certfile
self.ca_certs = ca_certs
self.cert_verify_cb = cert_verify_cb
try:
self.ssl_wrap_socket()
finally:
# Restart reader thread
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
typ, dat = self.capability()
if dat == [None]:
raise self.error('no CAPABILITY response from server')
self.capabilities = tuple(dat[-1].upper().split())
self._tls_established = True
typ, dat = self._untagged_response(typ, dat, name)
return self._deliver_dat(typ, dat, kw)
def status(self, mailbox, names, **kw):
"""(typ, [data]) = status(mailbox, names)
Request named status conditions for mailbox."""
name = 'STATUS'
kw['untagged_response'] = name
return self._simple_command(name, mailbox, names, **kw)
def store(self, message_set, command, flags, **kw):
"""(typ, [data]) = store(message_set, command, flags)
Alters flag dispositions for messages in mailbox."""
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags # Avoid quoting the flags
kw['untagged_response'] = 'FETCH'
return self._simple_command('STORE', message_set, command, flags, **kw)
def subscribe(self, mailbox, **kw):
"""(typ, [data]) = subscribe(mailbox)
Subscribe to new mailbox."""
try:
return self._simple_command('SUBSCRIBE', mailbox, **kw)
finally:
self._release_state_change()
def thread(self, threading_algorithm, charset, *search_criteria, **kw):
"""(type, [data]) = thread(threading_alogrithm, charset, search_criteria, ...)
IMAPrev1 extension THREAD command."""
name = 'THREAD'
kw['untagged_response'] = name
return self._simple_command(name, threading_algorithm, charset, *search_criteria, **kw)
def uid(self, command, *args, **kw):
"""(typ, [data]) = uid(command, arg, ...)
Execute "command arg ..." with messages identified by UID,
rather than message number.
Assumes 'command' is legal in current state.
Returns response appropriate to 'command'."""
command = command.upper()
if command in UID_direct:
resp = command
else:
resp = 'FETCH'
kw['untagged_response'] = resp
return self._simple_command('UID', command, *args, **kw)
def unsubscribe(self, mailbox, **kw):
"""(typ, [data]) = unsubscribe(mailbox)
Unsubscribe from old mailbox."""
try:
return self._simple_command('UNSUBSCRIBE', mailbox, **kw)
finally:
self._release_state_change()
def xatom(self, name, *args, **kw):
"""(typ, [data]) = xatom(name, arg, ...)
Allow simple extension commands notified by server in CAPABILITY response.
Assumes extension command 'name' is legal in current state.
Returns response appropriate to extension command 'name'."""
name = name.upper()
if not name in Commands:
Commands[name] = ((self.state,), False)
try:
return self._simple_command(name, *args, **kw)
finally:
self._release_state_change()
# Internal methods
def _append_untagged(self, typ, dat):
# Append new 'dat' to end of last untagged response if same 'typ',
# else append new response.
if dat is None: dat = ''
self.commands_lock.acquire()
if self.untagged_responses:
urn, urd = self.untagged_responses[-1]
if urn != typ:
urd = None
else:
urd = None
if urd is None:
urd = []
self.untagged_responses.append([typ, urd])
urd.append(dat)
self.commands_lock.release()
if __debug__: self._log(5, 'untagged_responses[%s] %s += ["%s"]' % (typ, len(urd)-1, dat))
def _check_bye(self):
bye = self._get_untagged_response('BYE', leave=True)
if bye:
raise self.abort(bye[-1])
def _checkquote(self, arg):
# Must quote command args if "atom-specials" present,
# and not already quoted. NB: single quotes are removed.
if not isinstance(arg, basestring):
return arg
if len(arg) >= 2 and (arg[0],arg[-1]) in (('(',')'),('"','"')):
return arg
if len(arg) >= 2 and (arg[0],arg[-1]) in (("'","'"),):
return arg[1:-1]
if arg and self.mustquote_cre.search(arg) is None:
return arg
return self._quote(arg)
def _choose_nonull_or_dflt(self, dflt, *args):
if isinstance(dflt, basestring):
dflttyp = basestring # Allow any string type
else:
dflttyp = type(dflt)
for arg in args:
if arg is not None:
if isinstance(arg, dflttyp):
return arg
if __debug__: self._log(0, 'bad arg is %s, expecting %s' % (type(arg), dflttyp))
return dflt
def _command(self, name, *args, **kw):
if Commands[name][CMD_VAL_ASYNC]:
cmdtyp = 'async'
else:
cmdtyp = 'sync'
if __debug__: self._log(1, '[%s] %s %s' % (cmdtyp, name, args))
if __debug__: self._log(3, 'state_change_pending.acquire')
self.state_change_pending.acquire()
self._end_idle()
if cmdtyp == 'async':
self.state_change_pending.release()
if __debug__: self._log(3, 'state_change_pending.release')
else:
# Need to wait for all async commands to complete
self._check_bye()
self.commands_lock.acquire()
if self.tagged_commands:
self.state_change_free.clear()
need_event = True
else:
need_event = False
self.commands_lock.release()
if need_event:
if __debug__: self._log(3, 'sync command %s waiting for empty commands Q' % name)
self.state_change_free.wait()
if __debug__: self._log(3, 'sync command %s proceeding' % name)
if self.state not in Commands[name][CMD_VAL_STATES]:
self.literal = None
raise self.error('command %s illegal in state %s'
% (name, self.state))
self._check_bye()
for typ in ('OK', 'NO', 'BAD'):
self._get_untagged_response(typ)
if self._get_untagged_response('READ-ONLY', leave=True) and not self.is_readonly:
self.literal = None
raise self.readonly('mailbox status changed to READ-ONLY')
if self.Terminate:
raise self.abort('connection closed')
rqb = self._request_push(name=name, **kw)
data = '%s %s' % (rqb.tag, name)
for arg in args:
if arg is None: continue
data = '%s %s' % (data, self._checkquote(arg))
literal = self.literal
if literal is not None:
self.literal = None
if isinstance(literal, basestring):
literator = None
data = '%s {%s}' % (data, len(literal))
else:
literator = literal
if __debug__: self._log(4, 'data=%s' % data)
rqb.data = '%s%s' % (data, CRLF)
if literal is None:
self.ouq.put(rqb)
return rqb
# Must setup continuation expectancy *before* ouq.put
crqb = self._request_push(tag='continuation')
self.ouq.put(rqb)
while True:
# Wait for continuation response
ok, data = crqb.get_response('command: %s => %%s' % name)
if __debug__: self._log(4, 'continuation => %s, %s' % (ok, data))
# NO/BAD response?
if not ok:
break
# Send literal
if literator is not None:
literal = literator(data, rqb)
if literal is None:
break
if literator is not None:
# Need new request for next continuation response
crqb = self._request_push(tag='continuation')
if __debug__: self._log(4, 'write literal size %s' % len(literal))
crqb.data = '%s%s' % (literal, CRLF)
self.ouq.put(crqb)
if literator is None:
break
return rqb
def _command_complete(self, rqb, kw):
# Called for non-callback commands
typ, dat = rqb.get_response('command: %s => %%s' % rqb.name)
self._check_bye()
if typ == 'BAD':
if __debug__: self._print_log()
raise self.error('%s command error: %s %s. Data: %.100s' % (rqb.name, typ, dat, rqb.data))
if 'untagged_response' in kw:
return self._untagged_response(typ, dat, kw['untagged_response'])
return typ, dat
def _command_completer(self, (response, cb_arg, error)):
# Called for callback commands
rqb, kw = cb_arg
rqb.callback = kw['callback']
rqb.callback_arg = kw.get('cb_arg')
if error is not None:
if __debug__: self._print_log()
typ, val = error
rqb.abort(typ, val)
return
bye = self._get_untagged_response('BYE', leave=True)
if bye:
rqb.abort(self.abort, bye[-1])
return
typ, dat = response
if typ == 'BAD':
if __debug__: self._print_log()
rqb.abort(self.error, '%s command error: %s %s. Data: %.100s' % (rqb.name, typ, dat, rqb.data))
return
if 'untagged_response' in kw:
response = self._untagged_response(typ, dat, kw['untagged_response'])
rqb.deliver(response)
def _deliver_dat(self, typ, dat, kw):
if 'callback' in kw:
kw['callback'](((typ, dat), kw.get('cb_arg'), None))
return typ, dat
def _deliver_exc(self, exc, dat, kw):
if 'callback' in kw:
kw['callback']((None, kw.get('cb_arg'), (exc, dat)))
raise exc(dat)
def _end_idle(self):
self.idle_lock.acquire()
irqb = self.idle_rqb
if irqb is None:
self.idle_lock.release()
return
self.idle_rqb = None
self.idle_timeout = None
self.idle_lock.release()
irqb.data = 'DONE%s' % CRLF
self.ouq.put(irqb)
if __debug__: self._log(2, 'server IDLE finished')
def _get_untagged_response(self, name, leave=False):
self.commands_lock.acquire()
for i, (typ, dat) in enumerate(self.untagged_responses):
if typ == name:
if not leave:
del self.untagged_responses[i]
self.commands_lock.release()
if __debug__: self._log(5, '_get_untagged_response(%s) => %s' % (name, dat))
return dat
self.commands_lock.release()
return None
def _match(self, cre, s):
# Run compiled regular expression 'cre' match method on 's'.
# Save result, return success.
self.mo = cre.match(s)
return self.mo is not None
def _put_response(self, resp):
if self._expecting_data > 0:
rlen = len(resp)
dlen = min(self._expecting_data, rlen)
self._expecting_data -= dlen
if rlen <= dlen:
self._accumulated_data.append(resp)
return
self._accumulated_data.append(resp[:dlen])
resp = resp[dlen:]
if self._accumulated_data:
typ, dat = self._literal_expected
self._append_untagged(typ, (dat, ''.join(self._accumulated_data)))
self._accumulated_data = []
# Protocol mandates all lines terminated by CRLF
resp = resp[:-2]
if 'continuation' in self.tagged_commands:
continuation_expected = True
else:
continuation_expected = False
if self._literal_expected is not None:
dat = resp
if self._match(self.literal_cre, dat):
self._literal_expected[1] = dat
self._expecting_data = int(self.mo.group('size'))
if __debug__: self._log(4, 'expecting literal size %s' % self._expecting_data)
return
typ = self._literal_expected[0]
self._literal_expected = None
self._append_untagged(typ, dat) # Tail
if __debug__: self._log(4, 'literal completed')
else:
# Command completion response?
if self._match(self.tagre, resp):
tag = self.mo.group('tag')
typ = self.mo.group('type')
dat = self.mo.group('data')
if not tag in self.tagged_commands:
if __debug__: self._log(1, 'unexpected tagged response: %s' % resp)
else:
self._request_pop(tag, (typ, [dat]))
else:
dat2 = None
# '*' (untagged) responses?
if not self._match(self.untagged_response_cre, resp):
if self._match(self.untagged_status_cre, resp):
dat2 = self.mo.group('data2')
if self.mo is None:
# Only other possibility is '+' (continuation) response...
if self._match(self.continuation_cre, resp):
if not continuation_expected:
if __debug__: self._log(1, "unexpected continuation response: '%s'" % resp)
return
self._request_pop('continuation', (True, self.mo.group('data')))
return
if __debug__: self._log(1, "unexpected response: '%s'" % resp)
return
typ = self.mo.group('type')
dat = self.mo.group('data')
if dat is None: dat = '' # Null untagged response
if dat2: dat = dat + ' ' + dat2
# Is there a literal to come?
if self._match(self.literal_cre, dat):
self._expecting_data = int(self.mo.group('size'))
if __debug__: self._log(4, 'read literal size %s' % self._expecting_data)
self._literal_expected = [typ, dat]
return
self._append_untagged(typ, dat)
if typ != 'OK': # NO, BYE, IDLE
self._end_idle()
# Bracketed response information?
if typ in ('OK', 'NO', 'BAD') and self._match(self.response_code_cre, dat):
self._append_untagged(self.mo.group('type'), self.mo.group('data'))
# Command waiting for aborted continuation response?
if continuation_expected:
self._request_pop('continuation', (False, resp))
# Bad news?
if typ in ('NO', 'BAD', 'BYE'):
if typ == 'BYE':
self.Terminate = True
if __debug__: self._log(1, '%s response: %s' % (typ, dat))
def _quote(self, arg):
return '"%s"' % arg.replace('\\', '\\\\').replace('"', '\\"')
def _release_state_change(self):
if self.state_change_pending.locked():
self.state_change_pending.release()
if __debug__: self._log(3, 'state_change_pending.release')
def _request_pop(self, name, data):
self.commands_lock.acquire()
rqb = self.tagged_commands.pop(name)
if not self.tagged_commands:
if __debug__: self._log(3, 'state_change_free.set')
self.state_change_free.set()
self.commands_lock.release()
if __debug__: self._log(4, '_request_pop(%s, %s) = %s' % (name, data, rqb.tag))
rqb.deliver(data)
def _request_push(self, tag=None, name=None, **kw):
self.commands_lock.acquire()
rqb = Request(self, name=name, **kw)
if tag is None:
tag = rqb.tag
self.tagged_commands[tag] = rqb
self.commands_lock.release()
if __debug__: self._log(4, '_request_push(%s, %s, %s) = %s' % (tag, name, `kw`, rqb.tag))
return rqb
def _simple_command(self, name, *args, **kw):
if 'callback' in kw:
# Note: old calling sequence for back-compat with python <2.6
self._command(name, callback=self._command_completer, cb_arg=kw, cb_self=True, *args)
return (None, None)
return self._command_complete(self._command(name, *args), kw)
def _untagged_response(self, typ, dat, name):
if typ == 'NO':
return typ, dat
data = self._get_untagged_response(name)
if not data:
return typ, [None]
while True:
dat = self._get_untagged_response(name)
if not dat:
break
data += dat
if __debug__: self._log(4, '_untagged_response(%s, ?, %s) => %s' % (typ, name, data))
return typ, data
# Threads
def _close_threads(self):
if __debug__: self._log(1, '_close_threads')
self.ouq.put(None)
self.wrth.join()
if __debug__: self._log(1, 'call shutdown')
self.shutdown()
self.rdth.join()
self.inth.join()
def _handler(self):
resp_timeout = self.resp_timeout
threading.currentThread().setName(self.identifier + 'handler')
time.sleep(0.1) # Don't start handling before main thread ready
if __debug__: self._log(1, 'starting')
typ, val = self.abort, 'connection terminated'
while not self.Terminate:
try:
if self.idle_timeout is not None:
timeout = self.idle_timeout - time.time()
if timeout <= 0:
timeout = 1
if __debug__:
if self.idle_rqb is not None:
self._log(5, 'server IDLING, timeout=%.2f' % timeout)
else:
timeout = resp_timeout
line = self.inq.get(True, timeout)
except Queue.Empty:
if self.idle_rqb is None:
if resp_timeout is not None and self.tagged_commands:
if __debug__: self._log(1, 'response timeout')
typ, val = self.abort, 'no response after %s secs' % resp_timeout
break
continue
if self.idle_timeout > time.time():
continue
if __debug__: self._log(2, 'server IDLE timedout')
line = IDLE_TIMEOUT_RESPONSE
if line is None:
if __debug__: self._log(1, 'inq None - terminating')
break
if not isinstance(line, basestring):
typ, val = line
break
try:
self._put_response(line)
except:
typ, val = self.error, 'program error: %s - %s' % sys.exc_info()[:2]
break
self.Terminate = True
if __debug__: self._log(1, 'terminating: %s' % `val`)
while not self.ouq.empty():
try:
self.ouq.get_nowait().abort(typ, val)
except Queue.Empty:
break
self.ouq.put(None)
self.commands_lock.acquire()
for name in self.tagged_commands.keys():
rqb = self.tagged_commands.pop(name)
rqb.abort(typ, val)
self.state_change_free.set()
self.commands_lock.release()
if __debug__: self._log(3, 'state_change_free.set')
if __debug__: self._log(1, 'finished')
if hasattr(select_module, "poll"):
def _reader(self):
threading.currentThread().setName(self.identifier + 'reader')
if __debug__: self._log(1, 'starting using poll')
def poll_error(state):
PollErrors = {
select.POLLERR: 'Error',
select.POLLHUP: 'Hang up',
select.POLLNVAL: 'Invalid request: descriptor not open',
}
return ' '.join([PollErrors[s] for s in PollErrors.keys() if (s & state)])
line_part = ''
poll = select.poll()
poll.register(self.read_fd, select.POLLIN)
rxzero = 0
terminate = False
read_poll_timeout = self.read_poll_timeout * 1000 # poll() timeout is in millisecs
while not (terminate or self.Terminate):
if self.state == LOGOUT:
timeout = 1
else:
timeout = read_poll_timeout
try:
r = poll.poll(timeout)
if __debug__: self._log(5, 'poll => %s' % `r`)
if not r:
continue # Timeout
fd,state = r[0]
if state & select.POLLIN:
data = self.read(self.read_size) # Drain ssl buffer if present
start = 0
dlen = len(data)
if __debug__: self._log(5, 'rcvd %s' % dlen)
if dlen == 0:
rxzero += 1
if rxzero > 5:
raise IOError("Too many read 0")
time.sleep(0.1)
continue # Try again
rxzero = 0
while True:
stop = data.find('\n', start)
if stop < 0:
line_part += data[start:]
break
stop += 1
line_part, start, line = \
'', stop, line_part + data[start:stop]
if __debug__: self._log(4, '< %s' % line)
self.inq.put(line)
if self.TerminateReader:
terminate = True
if state & ~(select.POLLIN):
raise IOError(poll_error(state))
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
self.inq.put((self.abort, reason))
break
poll.unregister(self.read_fd)
if __debug__: self._log(1, 'finished')
else:
# No "poll" - use select()
def _reader(self):
threading.currentThread().setName(self.identifier + 'reader')
if __debug__: self._log(1, 'starting using select')
line_part = ''
rxzero = 0
terminate = False
while not (terminate or self.Terminate):
if self.state == LOGOUT:
timeout = 1
else:
timeout = self.read_poll_timeout
try:
r,w,e = select.select([self.read_fd], [], [], timeout)
if __debug__: self._log(5, 'select => %s, %s, %s' % (r,w,e))
if not r: # Timeout
continue
data = self.read(self.read_size) # Drain ssl buffer if present
start = 0
dlen = len(data)
if __debug__: self._log(5, 'rcvd %s' % dlen)
if dlen == 0:
rxzero += 1
if rxzero > 5:
raise IOError("Too many read 0")
time.sleep(0.1)
continue # Try again
rxzero = 0
while True:
stop = data.find('\n', start)
if stop < 0:
line_part += data[start:]
break
stop += 1
line_part, start, line = \
'', stop, line_part + data[start:stop]
if __debug__: self._log(4, '< %s' % line)
self.inq.put(line)
if self.TerminateReader:
terminate = True
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
self.inq.put((self.abort, reason))
break
if __debug__: self._log(1, 'finished')
def _writer(self):
threading.currentThread().setName(self.identifier + 'writer')
if __debug__: self._log(1, 'starting')
reason = 'Terminated'
while not self.Terminate:
rqb = self.ouq.get()
if rqb is None:
break # Outq flushed
try:
self.send(rqb.data)
if __debug__: self._log(4, '> %s' % rqb.data)
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
rqb.abort(self.abort, reason)
break
self.inq.put((self.abort, reason))
if __debug__: self._log(1, 'finished')
# Debugging
if __debug__:
def _init_debug(self, debug=None, debug_file=None, debug_buf_lvl=None):
self.debug = self._choose_nonull_or_dflt(0, debug, Debug)
self.debug_file = self._choose_nonull_or_dflt(sys.stderr, debug_file)
self.debug_buf_lvl = self._choose_nonull_or_dflt(DFLT_DEBUG_BUF_LVL, debug_buf_lvl)
self.debug_lock = threading.Lock()
self._cmd_log_len = 20
self._cmd_log_idx = 0
self._cmd_log = {} # Last `_cmd_log_len' interactions
if self.debug:
self._mesg('imaplib2 version %s' % __version__)
self._mesg('imaplib2 debug level %s, buffer level %s' % (self.debug, self.debug_buf_lvl))
def _dump_ur(self, lvl):
if lvl > self.debug:
return
l = self.untagged_responses
if not l:
return
t = '\n\t\t'
l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
self.debug_lock.acquire()
self._mesg('untagged responses dump:%s%s' % (t, t.join(l)))
self.debug_lock.release()
def _log(self, lvl, line):
if lvl > self.debug:
return
if line[-2:] == CRLF:
line = line[:-2] + '\\r\\n'
tn = threading.currentThread().getName()
if lvl <= 1 or self.debug > self.debug_buf_lvl:
self.debug_lock.acquire()
self._mesg(line, tn)
self.debug_lock.release()
if lvl != 1:
return
# Keep log of last `_cmd_log_len' interactions for debugging.
self.debug_lock.acquire()
self._cmd_log[self._cmd_log_idx] = (line, tn, time.time())
self._cmd_log_idx += 1
if self._cmd_log_idx >= self._cmd_log_len:
self._cmd_log_idx = 0
self.debug_lock.release()
def _mesg(self, s, tn=None, secs=None):
if secs is None:
secs = time.time()
if tn is None:
tn = threading.currentThread().getName()
tm = time.strftime('%M:%S', time.localtime(secs))
try:
self.debug_file.write(' %s.%02d %s %s\n' % (tm, (secs*100)%100, tn, s))
self.debug_file.flush()
finally:
pass
def _print_log(self):
self.debug_lock.acquire()
i, n = self._cmd_log_idx, self._cmd_log_len
if n: self._mesg('last %d log messages:' % n)
while n:
try:
self._mesg(*self._cmd_log[i])
except:
pass
i += 1
if i >= self._cmd_log_len:
i = 0
n -= 1
self.debug_lock.release()
class IMAP4_SSL(IMAP4):
"""IMAP4 client class over SSL connection
Instantiate with:
IMAP4_SSL(host=None, port=None, keyfile=None, certfile=None, debug=None, debug_file=None, identifier=None, timeout=None)
host - host's name (default: localhost);
port - port number (default: standard IMAP4 SSL port);
keyfile - PEM formatted file that contains your private key (default: None);
certfile - PEM formatted certificate chain file (default: None);
ca_certs - PEM formatted certificate chain file used to validate server certificates (default: None);
cert_verify_cb - function to verify authenticity of server certificates (default: None);
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response.
debug_buf_lvl - debug level at which buffering is turned off.
For more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, host=None, port=None, keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.keyfile = keyfile
self.certfile = certfile
self.ca_certs = ca_certs
self.cert_verify_cb = cert_verify_cb
IMAP4.__init__(self, host, port, debug, debug_file, identifier, timeout, debug_buf_lvl)
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup secure connection to remote server on "host:port"
(default: localhost:standard IMAP4 SSL port).
This connection will be used by the routines:
read, send, shutdown, socket, ssl."""
self.host = self._choose_nonull_or_dflt('', host)
self.port = self._choose_nonull_or_dflt(IMAP4_SSL_PORT, port)
self.sock = self.open_socket()
self.ssl_wrap_socket()
def read(self, size):
"""data = read(size)
Read at most 'size' bytes from remote."""
if self.decompressor is None:
return self.sock.read(size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = self.sock.read(READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""send(data)
Send 'data' to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
if hasattr(self.sock, "sendall"):
self.sock.sendall(data)
else:
bytes = len(data)
while bytes > 0:
sent = self.sock.write(data)
if sent == bytes:
break # avoid copy
data = data[sent:]
bytes = bytes - sent
def ssl(self):
"""ssl = ssl()
Return ssl instance used to communicate with the IMAP4 server."""
return self.sock
class IMAP4_stream(IMAP4):
"""IMAP4 client class over a stream
Instantiate with:
IMAP4_stream(command, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None)
command - string that can be passed to subprocess.Popen();
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response.
debug_buf_lvl - debug level at which buffering is turned off.
For more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, command, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.command = command
self.host = command
self.port = None
self.sock = None
self.writefile, self.readfile = None, None
self.read_fd = None
IMAP4.__init__(self, None, None, debug, debug_file, identifier, timeout, debug_buf_lvl)
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup a stream connection via 'self.command'.
This connection will be used by the routines:
read, send, shutdown, socket."""
from subprocess import Popen, PIPE
self._P = Popen(self.command, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
self.writefile, self.readfile = self._P.stdin, self._P.stdout
self.read_fd = self.readfile.fileno()
def read(self, size):
"""Read 'size' bytes from remote."""
if self.decompressor is None:
return os.read(self.read_fd, size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = os.read(self.read_fd, READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""Send data to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
self.writefile.write(data)
self.writefile.flush()
def shutdown(self):
"""Close I/O established in "open"."""
self.readfile.close()
self.writefile.close()
class _Authenticator(object):
"""Private class to provide en/de-coding
for base64 authentication conversation."""
def __init__(self, mechinst):
self.mech = mechinst # Callable object to provide/process data
def process(self, data, rqb):
ret = self.mech(self.decode(data))
if ret is None:
return '*' # Abort conversation
return self.encode(ret)
def encode(self, inp):
#
# Invoke binascii.b2a_base64 iteratively with
# short even length buffers, strip the trailing
# line feed from the result and append. "Even"
# means a number that factors to both 6 and 8,
# so when it gets to the end of the 8-bit input
# there's no partial 6-bit output.
#
oup = ''
while inp:
if len(inp) > 48:
t = inp[:48]
inp = inp[48:]
else:
t = inp
inp = ''
e = binascii.b2a_base64(t)
if e:
oup = oup + e[:-1]
return oup
def decode(self, inp):
if not inp:
return ''
return binascii.a2b_base64(inp)
class _IdleCont(object):
"""When process is called, server is in IDLE state
and will send asynchronous changes."""
def __init__(self, parent, timeout):
self.parent = parent
self.timeout = parent._choose_nonull_or_dflt(IDLE_TIMEOUT, timeout)
self.parent.idle_timeout = self.timeout + time.time()
def process(self, data, rqb):
self.parent.idle_lock.acquire()
self.parent.idle_rqb = rqb
self.parent.idle_timeout = self.timeout + time.time()
self.parent.idle_lock.release()
if __debug__: self.parent._log(2, 'server IDLE started, timeout in %.2f secs' % self.timeout)
return None
MonthNames = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
Mon2num = dict(zip((x.encode() for x in MonthNames[1:]), range(1, 13)))
InternalDate = re.compile(r'.*INTERNALDATE "'
r'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
r'"')
def Internaldate2Time(resp):
"""time_tuple = Internaldate2Time(resp)
Convert IMAP4 INTERNALDATE to UT."""
mo = InternalDate.match(resp)
if not mo:
return None
mon = Mon2num[mo.group('mon')]
zonen = mo.group('zonen')
day = int(mo.group('day'))
year = int(mo.group('year'))
hour = int(mo.group('hour'))
min = int(mo.group('min'))
sec = int(mo.group('sec'))
zoneh = int(mo.group('zoneh'))
zonem = int(mo.group('zonem'))
# INTERNALDATE timezone must be subtracted to get UT
zone = (zoneh*60 + zonem)*60
if zonen == '-':
zone = -zone
tt = (year, mon, day, hour, min, sec, -1, -1, -1)
utc = time.mktime(tt)
# Following is necessary because the time module has no 'mkgmtime'.
# 'mktime' assumes arg in local timezone, so adds timezone/altzone.
lt = time.localtime(utc)
if time.daylight and lt[-1]:
zone = zone + time.altzone
else:
zone = zone + time.timezone
return time.localtime(utc - zone)
Internaldate2tuple = Internaldate2Time # (Backward compatible)
def Time2Internaldate(date_time):
"""'"DD-Mmm-YYYY HH:MM:SS +HHMM"' = Time2Internaldate(date_time)
Convert 'date_time' to IMAP4 INTERNALDATE representation."""
if isinstance(date_time, (int, float)):
tt = time.localtime(date_time)
elif isinstance(date_time, (tuple, time.struct_time)):
tt = date_time
elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
return date_time # Assume in correct format
else:
raise ValueError("date_time not of a known type")
if time.daylight and tt[-1]:
zone = -time.altzone
else:
zone = -time.timezone
return ('"%2d-%s-%04d %02d:%02d:%02d %+03d%02d"' %
((tt[2], MonthNames[tt[1]], tt[0]) + tt[3:6] +
divmod(zone//60, 60)))
FLAGS_cre = re.compile(r'.*FLAGS \((?P<flags>[^\)]*)\)')
def ParseFlags(resp):
"""('flag', ...) = ParseFlags(line)
Convert IMAP4 flags response to python tuple."""
mo = FLAGS_cre.match(resp)
if not mo:
return ()
return tuple(mo.group('flags').split())
|
ws.py | # pyright: reportGeneralTypeIssues=false
import threading
from typing import Optional
import websocket # type: ignore
from jacdac.transport import Transport
class WebSocketTransport(Transport):
def __init__(self, url: str):
self.url = url
self.ws: Optional[websocket.WebSocketApp] = None
self.opened = False
self.open()
def open(self) -> None:
self.ws = websocket.WebSocketApp(self.url,
on_open=self.on_open,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close)
t = threading.Thread(target=self.ws.run_forever) # type: ignore
t.daemon = True
t.start()
def send(self, pkt: bytes) -> None:
if self.opened:
self.ws.send(pkt, opcode=2) # type: ignore
def on_message(self, _, message: bytes):
if self.on_receive:
self.on_receive(message)
def on_error(self, _, error: str):
if self.opened:
print(error)
def on_close(self, _, close_status_code: int, close_msg: str):
self.opened = False
def on_open(self, _):
print("devtools server connected at " + self.url)
self.opened = True
|
advanced-reboot.py | #
#ptf --test-dir ptftests fast-reboot --qlen=1000 --platform remote -t 'verbose=True;dut_username="admin";dut_hostname="10.0.0.243";reboot_limit_in_seconds=30;portchannel_ports_file="/tmp/portchannel_interfaces.json";vlan_ports_file="/tmp/vlan_interfaces.json";ports_file="/tmp/ports.json";dut_mac="4c:76:25:f5:48:80";default_ip_range="192.168.0.0/16";vlan_ip_range="172.0.0.0/22";arista_vms="[\"10.0.0.200\",\"10.0.0.201\",\"10.0.0.202\",\"10.0.0.203\"]"' --platform-dir ptftests --disable-vxlan --disable-geneve --disable-erspan --disable-mpls --disable-nvgre
#
#
# This test checks that DUT is able to make FastReboot procedure
#
# This test supposes that fast-reboot/warm-reboot initiates by running /usr/bin/{fast,warm}-reboot command.
#
# The test uses "pings". The "pings" are packets which are sent through dataplane in two directions
# 1. From one of vlan interfaces to T1 device. The source ip, source interface, and destination IP are chosen randomly from valid choices. Number of packet is 100.
# 2. From all of portchannel ports to all of vlan ports. The source ip, source interface, and destination IP are chosed sequentially from valid choices.
# Currently we have 500 distrinct destination vlan addresses. Our target to have 1000 of them.
#
# The test sequence is following:
# 1. Check that DUT is stable. That means that "pings" work in both directions: from T1 to servers and from servers to T1.
# 2. If DUT is stable the test starts continiously pinging DUT in both directions.
# 3. The test runs '/usr/bin/{fast,warm}-reboot' on DUT remotely. The ssh key supposed to be uploaded by ansible before the test
# 4. As soon as it sees that ping starts failuring in one of directions the test registers a start of dataplace disruption
# 5. As soon as the test sees that pings start working for DUT in both directions it registers a stop of dataplane disruption
# 6. If the length of the disruption is less than 30 seconds (if not redefined by parameter) - the test passes
# 7. If there're any drops, when control plane is down - the test fails
# 8. When test start reboot procedure it connects to all VM (which emulates T1) and starts fetching status of BGP and LACP
# LACP is supposed to be down for one time only, if not - the test fails
# if default value of BGP graceful restart timeout is less than 120 seconds the test fails
# if BGP graceful restart is not enabled on DUT the test fails
# If BGP graceful restart timeout value is almost exceeded (less than 15 seconds) the test fails
# if BGP routes disappeares more then once, the test failed
#
# The test expects you're running the test with link state propagation helper.
# That helper propagate a link state from fanout switch port to corresponding VM port
#
import ptf
from ptf.base_tests import BaseTest
from ptf import config
import ptf.testutils as testutils
from ptf.testutils import *
from ptf.dataplane import match_exp_pkt
import datetime
import time
import subprocess
from ptf.mask import Mask
import socket
import ptf.packet as scapy
import thread
import threading
from multiprocessing.pool import ThreadPool, TimeoutError
import os
import signal
import random
import struct
import socket
from pprint import pprint
from fcntl import ioctl
import sys
import json
import re
from collections import defaultdict
import json
import paramiko
import Queue
import pickle
from operator import itemgetter
import scapy.all as scapyall
import itertools
from arista import Arista
import sad_path as sp
class StateMachine():
def __init__(self, init_state='init'):
self.state_lock = threading.RLock()
self.state_time = {} # Recording last time when entering a state
self.state = None
self.flooding = False
self.set(init_state)
def set(self, state):
with self.state_lock:
self.state = state
self.state_time[state] = datetime.datetime.now()
def get(self):
with self.state_lock:
cur_state = self.state
return cur_state
def get_state_time(self, state):
with self.state_lock:
time = self.state_time[state]
return time
def set_flooding(self, flooding):
with self.state_lock:
self.flooding = flooding
def is_flooding(self):
with self.state_lock:
flooding = self.flooding
return flooding
class ReloadTest(BaseTest):
TIMEOUT = 0.5
VLAN_BASE_MAC_PATTERN = '72060001{:04}'
LAG_BASE_MAC_PATTERN = '5c010203{:04}'
SOCKET_RECV_BUFFER_SIZE = 10 * 1024 * 1024
def __init__(self):
BaseTest.__init__(self)
self.fails = {}
self.info = {}
self.cli_info = {}
self.logs_info = {}
self.log_lock = threading.RLock()
self.vm_handle = None
self.pre_handle = None
self.test_params = testutils.test_params_get()
self.check_param('verbose', False, required=False)
self.check_param('dut_username', '', required=True)
self.check_param('dut_hostname', '', required=True)
self.check_param('reboot_limit_in_seconds', 30, required=False)
self.check_param('reboot_type', 'fast-reboot', required=False)
self.check_param('graceful_limit', 180, required=False)
self.check_param('portchannel_ports_file', '', required=True)
self.check_param('vlan_ports_file', '', required=True)
self.check_param('ports_file', '', required=True)
self.check_param('dut_mac', '', required=True)
self.check_param('dut_vlan_ip', '', required=True)
self.check_param('default_ip_range', '', required=True)
self.check_param('vlan_ip_range', '', required=True)
self.check_param('lo_prefix', '10.1.0.32/32', required=False)
self.check_param('lo_v6_prefix', 'fc00:1::/64', required=False)
self.check_param('arista_vms', [], required=True)
self.check_param('min_bgp_gr_timeout', 15, required=False)
self.check_param('warm_up_timeout_secs', 300, required=False)
self.check_param('dut_stabilize_secs', 30, required=False)
self.check_param('preboot_files', None, required = False)
self.check_param('preboot_oper', None, required = False) # preboot sad path to inject before warm-reboot
self.check_param('allow_vlan_flooding', False, required = False)
self.check_param('sniff_time_incr', 60, required = False)
if not self.test_params['preboot_oper'] or self.test_params['preboot_oper'] == 'None':
self.test_params['preboot_oper'] = None
if self.test_params['preboot_oper'] is not None:
self.log_file_name = '/tmp/%s-%s.log' % (self.test_params['reboot_type'], self.test_params['preboot_oper'])
else:
self.log_file_name = '/tmp/%s.log' % self.test_params['reboot_type']
self.log_fp = open(self.log_file_name, 'w')
# a flag whether to populate FDB by sending traffic from simulated servers
# usually ARP responder will make switch populate its FDB table, but Mellanox on 201803 has
# no L3 ARP support, so this flag is used to W/A this issue
self.setup_fdb_before_test = self.test_params.get('setup_fdb_before_test', False)
# Default settings
self.ping_dut_pkts = 10
self.arp_ping_pkts = 1
self.nr_pc_pkts = 100
self.nr_tests = 3
self.reboot_delay = 10
self.task_timeout = 300 # Wait up to 5 minutes for tasks to complete
self.max_nr_vl_pkts = 500 # FIXME: should be 1000.
# But ptf is not fast enough + swss is slow for FDB and ARP entries insertions
self.timeout_thr = None
self.time_to_listen = 180.0 # Listen for more then 180 seconds, to be used in sniff_in_background method.
# Inter-packet interval, to be used in send_in_background method.
# Improve this interval to gain more precision of disruptions.
self.send_interval = 0.0035
self.packets_to_send = min(int(self.time_to_listen / (self.send_interval + 0.0015)), 45000) # How many packets to be sent in send_in_background method
# Thread pool for background watching operations
self.pool = ThreadPool(processes=3)
# State watcher attributes
self.watching = False
self.cpu_state = StateMachine('init')
self.asic_state = StateMachine('init')
self.vlan_state = StateMachine('init')
self.vlan_lock = threading.RLock()
self.asic_state_time = {} # Recording last asic state entering time
self.asic_vlan_reach = [] # Recording asic vlan reachability
self.recording = False # Knob for recording asic_vlan_reach
# light_probe:
# True : when one direction probe fails, don't probe another.
# False: when one direction probe fails, continue probe another.
self.light_probe = False
# We have two data plane traffic generators which are mutualy exclusive
# one is the reachability_watcher thread
# second is the fast send_in_background
self.dataplane_io_lock = threading.Lock()
self.allow_vlan_flooding = bool(self.test_params['allow_vlan_flooding'])
return
def read_json(self, name):
with open(self.test_params[name]) as fp:
content = json.load(fp)
return content
def read_port_indices(self):
port_indices = self.read_json('ports_file')
return port_indices
def read_portchannel_ports(self):
content = self.read_json('portchannel_ports_file')
pc_ifaces = []
for pc in content.values():
pc_ifaces.extend([self.port_indices[member] for member in pc['members']])
return pc_ifaces
def read_vlan_ports(self):
content = self.read_json('vlan_ports_file')
if len(content) > 1:
raise Exception("Too many vlans")
return [self.port_indices[ifname] for ifname in content.values()[0]['members']]
def check_param(self, param, default, required = False):
if param not in self.test_params:
if required:
raise Exception("Test parameter '%s' is required" % param)
self.test_params[param] = default
def random_ip(self, ip):
net_addr, mask = ip.split('/')
n_hosts = 2**(32 - int(mask))
random_host = random.randint(2, n_hosts - 2)
return self.host_ip(ip, random_host)
def host_ip(self, net_ip, host_number):
src_addr, mask = net_ip.split('/')
n_hosts = 2**(32 - int(mask))
if host_number > (n_hosts - 2):
raise Exception("host number %d is greater than number of hosts %d in the network %s" % (host_number, n_hosts - 2, net_ip))
src_addr_n = struct.unpack(">I", socket.inet_aton(src_addr))[0]
net_addr_n = src_addr_n & (2**32 - n_hosts)
host_addr_n = net_addr_n + host_number
host_ip = socket.inet_ntoa(struct.pack(">I", host_addr_n))
return host_ip
def random_port(self, ports):
return random.choice(ports)
def log(self, message, verbose=False):
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with self.log_lock:
if verbose and self.test_params['verbose'] or not verbose:
print "%s : %s" % (current_time, message)
self.log_fp.write("%s : %s\n" % (current_time, message))
def timeout(self, func, seconds, message):
async_res = self.pool.apply_async(func)
try:
res = async_res.get(timeout=seconds)
except Exception as err:
# TimeoutError and Exception's from func
# captured here
raise type(err)(message)
return res
def generate_vlan_servers(self):
vlan_host_map = defaultdict(dict)
vlan_ip_range = self.test_params['vlan_ip_range']
_, mask = vlan_ip_range.split('/')
n_hosts = min(2**(32 - int(mask)) - 3, self.max_nr_vl_pkts)
for counter, i in enumerate(xrange(2, n_hosts + 2)):
mac = self.VLAN_BASE_MAC_PATTERN.format(counter)
port = self.vlan_ports[i % len(self.vlan_ports)]
addr = self.host_ip(vlan_ip_range, i)
vlan_host_map[port][addr] = mac
self.nr_vl_pkts = n_hosts
return vlan_host_map
def generate_arp_responder_conf(self, vlan_host_map):
arp_responder_conf = {}
for port in vlan_host_map:
arp_responder_conf['eth{}'.format(port)] = vlan_host_map[port]
return arp_responder_conf
def dump_arp_responder_config(self, dump):
# save data for arp_replay process
filename = "/tmp/from_t1.json" if self.preboot_oper is None else "/tmp/from_t1_%s.json" % self.preboot_oper
with open(filename, "w") as fp:
json.dump(dump, fp)
def get_peer_dev_info(self):
content = self.read_json('peer_dev_info')
for key in content.keys():
if 'ARISTA' in key:
self.vm_dut_map[key] = dict()
self.vm_dut_map[key]['mgmt_addr'] = content[key]['mgmt_addr']
# initialize all the port mapping
self.vm_dut_map[key]['dut_ports'] = []
self.vm_dut_map[key]['neigh_ports'] = []
self.vm_dut_map[key]['ptf_ports'] = []
def get_portchannel_info(self):
content = self.read_json('portchannel_ports_file')
for key in content.keys():
for member in content[key]['members']:
for vm_key in self.vm_dut_map.keys():
if member in self.vm_dut_map[vm_key]['dut_ports']:
self.vm_dut_map[vm_key]['dut_portchannel'] = str(key)
self.vm_dut_map[vm_key]['neigh_portchannel'] = 'Port-Channel1'
break
def get_neigh_port_info(self):
content = self.read_json('neigh_port_info')
for key in content.keys():
if content[key]['name'] in self.vm_dut_map.keys():
self.vm_dut_map[content[key]['name']]['dut_ports'].append(str(key))
self.vm_dut_map[content[key]['name']]['neigh_ports'].append(str(content[key]['port']))
self.vm_dut_map[content[key]['name']]['ptf_ports'].append(self.port_indices[key])
def build_peer_mapping(self):
'''
Builds a map of the form
'ARISTA01T1': {'mgmt_addr':
'neigh_portchannel'
'dut_portchannel'
'neigh_ports'
'dut_ports'
'ptf_ports'
}
'''
self.vm_dut_map = {}
for file in self.test_params['preboot_files'].split(','):
self.test_params[file] = '/tmp/' + file + '.json'
self.get_peer_dev_info()
self.get_neigh_port_info()
self.get_portchannel_info()
def populate_fail_info(self, fails):
for key in fails:
if key not in self.fails:
self.fails[key] = set()
self.fails[key] |= fails[key]
def get_preboot_info(self):
'''
Prepares the msg string to log when a preboot_oper is defined.
preboot_oper can be represented in the following ways
eg. 'preboot_oper' - a single VM will be selected and preboot_oper will be applied to it
'neigh_bgp_down:2' - 2 VMs will be selected and preboot_oper will be applied to the selected 2 VMs
'neigh_lag_member_down:3:1' - this case is used for lag member down operation only. This indicates that
3 VMs will be selected and 1 of the lag members in the porchannel will be brought down
'''
msg = ''
if self.preboot_oper:
msg = 'Preboot oper: %s ' % self.preboot_oper
if ':' in self.preboot_oper:
oper_list = self.preboot_oper.split(':')
msg = 'Preboot oper: %s ' % oper_list[0] # extract the preboot oper_type
if len(oper_list) > 2:
# extract the number of VMs and the number of LAG members. preboot_oper will be of the form oper:no of VMS:no of lag members
msg += 'Number of sad path VMs: %s Lag member down in a portchannel: %s' % (oper_list[-2], oper_list[-1])
else:
# extract the number of VMs. preboot_oper will be of the form oper:no of VMS
msg += 'Number of sad path VMs: %s' % oper_list[-1]
return msg
def setUp(self):
self.fails['dut'] = set()
self.port_indices = self.read_port_indices()
self.portchannel_ports = self.read_portchannel_ports()
self.vlan_ports = self.read_vlan_ports()
if self.test_params['preboot_oper'] is not None:
self.build_peer_mapping()
self.vlan_ip_range = self.test_params['vlan_ip_range']
self.default_ip_range = self.test_params['default_ip_range']
self.limit = datetime.timedelta(seconds=self.test_params['reboot_limit_in_seconds'])
self.reboot_type = self.test_params['reboot_type']
self.preboot_oper = self.test_params['preboot_oper']
if self.reboot_type not in ['fast-reboot', 'warm-reboot']:
raise ValueError('Not supported reboot_type %s' % self.reboot_type)
self.dut_ssh = self.test_params['dut_username'] + '@' + self.test_params['dut_hostname']
self.dut_mac = self.test_params['dut_mac']
# get VM info
arista_vms = self.test_params['arista_vms'][1:-1].split(",")
self.ssh_targets = []
for vm in arista_vms:
if (vm.startswith("'") or vm.startswith('"')) and (vm.endswith("'") or vm.endswith('"')):
self.ssh_targets.append(vm[1:-1])
else:
self.ssh_targets.append(vm)
self.log("Converted addresses VMs: %s" % str(self.ssh_targets))
if self.preboot_oper is not None:
self.log("Preboot Operations:")
self.pre_handle = sp.PrebootTest(self.preboot_oper, self.ssh_targets, self.portchannel_ports, self.vm_dut_map, self.test_params, self.dut_ssh)
(self.ssh_targets, self.portchannel_ports, self.neigh_vm), (log_info, fails) = self.pre_handle.setup()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
log_info, fails = self.pre_handle.verify()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
self.vlan_host_map = self.generate_vlan_servers()
arp_responder_conf = self.generate_arp_responder_conf(self.vlan_host_map)
self.dump_arp_responder_config(arp_responder_conf)
self.random_vlan = random.choice(self.vlan_ports)
self.from_server_src_port = self.random_vlan
self.from_server_src_addr = random.choice(self.vlan_host_map[self.random_vlan].keys())
self.from_server_dst_addr = self.random_ip(self.test_params['default_ip_range'])
self.from_server_dst_ports = self.portchannel_ports
self.log("Test params:")
self.log("DUT ssh: %s" % self.dut_ssh)
self.log("DUT reboot limit in seconds: %s" % self.limit)
self.log("DUT mac address: %s" % self.dut_mac)
self.log("From server src addr: %s" % self.from_server_src_addr)
self.log("From server src port: %s" % self.from_server_src_port)
self.log("From server dst addr: %s" % self.from_server_dst_addr)
self.log("From server dst ports: %s" % self.from_server_dst_ports)
self.log("From upper layer number of packets: %d" % self.nr_vl_pkts)
self.log("VMs: %s" % str(self.test_params['arista_vms']))
self.log("Reboot type is %s" % self.reboot_type)
self.generate_from_t1()
self.generate_from_vlan()
self.generate_ping_dut_lo()
self.generate_arp_ping_packet()
if self.reboot_type == 'warm-reboot':
self.log(self.get_preboot_info())
# Pre-generate list of packets to be sent in send_in_background method.
generate_start = datetime.datetime.now()
self.generate_bidirectional()
self.log("%d packets are ready after: %s" % (len(self.packets_list), str(datetime.datetime.now() - generate_start)))
self.dataplane = ptf.dataplane_instance
for p in self.dataplane.ports.values():
port = p.get_packet_source()
port.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.SOCKET_RECV_BUFFER_SIZE)
self.dataplane.flush()
if config["log_dir"] != None:
filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
self.dataplane.start_pcap(filename)
self.log("Enabling arp_responder")
self.cmd(["supervisorctl", "restart", "arp_responder"])
return
def setup_fdb(self):
""" simulate traffic generated from servers to help populate FDB """
vlan_map = self.vlan_host_map
from_servers_pkt = testutils.simple_tcp_packet(
eth_dst=self.dut_mac,
ip_dst=self.from_server_dst_addr,
)
for port in vlan_map:
for addr in vlan_map[port]:
mac = vlan_map[port][addr]
from_servers_pkt[scapy.Ether].src = self.hex_to_mac(mac)
from_servers_pkt[scapy.IP].src = addr
testutils.send(self, port, from_servers_pkt)
# make sure orchagent processed new FDBs
time.sleep(1)
def tearDown(self):
self.log("Disabling arp_responder")
self.cmd(["supervisorctl", "stop", "arp_responder"])
# Stop watching DUT
self.watching = False
if config["log_dir"] != None:
self.dataplane.stop_pcap()
self.log_fp.close()
def get_if(self, iff, cmd):
s = socket.socket()
ifreq = ioctl(s, cmd, struct.pack("16s16x",iff))
s.close()
return ifreq
def get_mac(self, iff):
SIOCGIFHWADDR = 0x8927 # Get hardware address
return ':'.join(['%02x' % ord(char) for char in self.get_if(iff, SIOCGIFHWADDR)[18:24]])
@staticmethod
def hex_to_mac(hex_mac):
return ':'.join(hex_mac[i:i+2] for i in range(0, len(hex_mac), 2))
def generate_from_t1(self):
self.from_t1 = []
# for each server host create a packet destinating server IP
for counter, host_port in enumerate(self.vlan_host_map):
src_addr = self.random_ip(self.default_ip_range)
src_port = self.random_port(self.portchannel_ports)
for server_ip in self.vlan_host_map[host_port]:
dst_addr = server_ip
# generate source MAC address for traffic based on LAG_BASE_MAC_PATTERN
mac_addr = self.hex_to_mac(self.LAG_BASE_MAC_PATTERN.format(counter))
packet = simple_tcp_packet(eth_src=mac_addr,
eth_dst=self.dut_mac,
ip_src=src_addr,
ip_dst=dst_addr,
ip_ttl=255,
tcp_dport=5000)
self.from_t1.append((src_port, str(packet)))
# expect any packet with dport 5000
exp_packet = simple_tcp_packet(
ip_src="0.0.0.0",
ip_dst="0.0.0.0",
tcp_dport=5000,
)
self.from_t1_exp_packet = Mask(exp_packet)
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.Ether, "src")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "src")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "dst")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "chksum")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.TCP, "chksum")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "ttl")
def generate_from_vlan(self):
packet = simple_tcp_packet(
eth_dst=self.dut_mac,
ip_src=self.from_server_src_addr,
ip_dst=self.from_server_dst_addr,
tcp_dport=5000
)
exp_packet = simple_tcp_packet(
ip_src=self.from_server_src_addr,
ip_dst=self.from_server_dst_addr,
ip_ttl=63,
tcp_dport=5000,
)
self.from_vlan_exp_packet = Mask(exp_packet)
self.from_vlan_exp_packet.set_do_not_care_scapy(scapy.Ether, "src")
self.from_vlan_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.from_vlan_packet = str(packet)
def generate_ping_dut_lo(self):
dut_lo_ipv4 = self.test_params['lo_prefix'].split('/')[0]
packet = simple_icmp_packet(eth_dst=self.dut_mac,
ip_src=self.from_server_src_addr,
ip_dst=dut_lo_ipv4)
exp_packet = simple_icmp_packet(eth_src=self.dut_mac,
ip_src=dut_lo_ipv4,
ip_dst=self.from_server_src_addr,
icmp_type='echo-reply')
self.ping_dut_exp_packet = Mask(exp_packet)
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.IP, "id")
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.IP, "chksum")
self.ping_dut_packet = str(packet)
def generate_arp_ping_packet(self):
vlan_ip_range = self.test_params['vlan_ip_range']
vlan_port_canadiates = range(len(self.vlan_ports))
vlan_port_canadiates.remove(0) # subnet prefix
vlan_port_canadiates.remove(1) # subnet IP on dut
src_idx = random.choice(vlan_port_canadiates)
vlan_port_canadiates.remove(src_idx)
dst_idx = random.choice(vlan_port_canadiates)
src_port = self.vlan_ports[src_idx]
dst_port = self.vlan_ports[dst_idx]
src_mac = self.get_mac('eth%d' % src_port)
src_addr = self.host_ip(vlan_ip_range, src_idx)
dst_addr = self.host_ip(vlan_ip_range, dst_idx)
packet = simple_arp_packet(eth_src=src_mac, arp_op=1, ip_snd=src_addr, ip_tgt=dst_addr, hw_snd=src_mac)
expect = simple_arp_packet(eth_dst=src_mac, arp_op=2, ip_snd=dst_addr, ip_tgt=src_addr, hw_tgt=src_mac)
self.log("ARP ping: src idx %d port %d mac %s addr %s" % (src_idx, src_port, src_mac, src_addr))
self.log("ARP ping: dst idx %d port %d addr %s" % (dst_idx, dst_port, dst_addr))
self.arp_ping = str(packet)
self.arp_resp = Mask(expect)
self.arp_resp.set_do_not_care_scapy(scapy.Ether, 'src')
self.arp_resp.set_do_not_care_scapy(scapy.ARP, 'hwtype')
self.arp_resp.set_do_not_care_scapy(scapy.ARP, 'hwsrc')
self.arp_src_port = src_port
def generate_bidirectional(self):
"""
This method is used to pre-generate packets to be sent in background thread.
Packets are composed into a list, and present a bidirectional flow as next:
five packet from T1, one packet from vlan.
Each packet has sequential TCP Payload - to be identified later.
"""
self.send_interval = self.time_to_listen / self.packets_to_send
self.packets_list = []
from_t1_iter = itertools.cycle(self.from_t1)
for i in xrange(self.packets_to_send):
payload = '0' * 60 + str(i)
if (i % 5) == 0 : # From vlan to T1.
packet = scapyall.Ether(self.from_vlan_packet)
packet.load = payload
from_port = self.from_server_src_port
else: # From T1 to vlan.
src_port, packet = next(from_t1_iter)
packet = scapyall.Ether(packet)
packet.load = payload
from_port = src_port
self.packets_list.append((from_port, str(packet)))
def runTest(self):
self.reboot_start = None
no_routing_start = None
no_routing_stop = None
no_cp_replies = None
upper_replies = []
routing_always = False
self.ssh_jobs = []
for addr in self.ssh_targets:
q = Queue.Queue()
thr = threading.Thread(target=self.peer_state_check, kwargs={'ip': addr, 'queue': q})
thr.setDaemon(True)
self.ssh_jobs.append((thr, q))
thr.start()
thr = threading.Thread(target=self.reboot_dut)
thr.setDaemon(True)
try:
if self.setup_fdb_before_test:
self.log("Run some server traffic to populate FDB table...")
self.setup_fdb()
self.log("Starting reachability state watch thread...")
self.watching = True
self.light_probe = False
self.watcher_is_stopped = threading.Event() # Waiter Event for the Watcher state is stopped.
self.watcher_is_running = threading.Event() # Waiter Event for the Watcher state is running.
self.watcher_is_stopped.set() # By default the Watcher is not running.
self.watcher_is_running.clear() # By default its required to wait for the Watcher started.
# Give watch thread some time to wind up
watcher = self.pool.apply_async(self.reachability_watcher)
time.sleep(5)
self.log("Check that device is alive and pinging")
self.fails['dut'].add("DUT is not ready for test")
self.wait_dut_to_warm_up()
self.fails['dut'].clear()
self.log("Schedule to reboot the remote switch in %s sec" % self.reboot_delay)
thr.start()
self.log("Wait until Control plane is down")
self.timeout(self.wait_until_cpu_port_down, self.task_timeout, "DUT hasn't shutdown in {} seconds".format(self.task_timeout))
if self.reboot_type == 'fast-reboot':
self.light_probe = True
self.reboot_start = datetime.datetime.now()
self.log("Dut reboots: reboot start %s" % str(self.reboot_start))
if self.reboot_type == 'fast-reboot':
self.log("Check that device is still forwarding data plane traffic")
self.fails['dut'].add("Data plane has a forwarding problem after CPU went down")
self.check_alive()
self.fails['dut'].clear()
self.log("Wait until control plane up")
async_cpu_up = self.pool.apply_async(self.wait_until_cpu_port_up)
self.log("Wait until data plane stops")
async_forward_stop = self.pool.apply_async(self.check_forwarding_stop)
try:
async_cpu_up.get(timeout=self.task_timeout)
except TimeoutError as e:
self.log("DUT hasn't bootup in %d seconds" % self.task_timeout)
self.fails['dut'].add("DUT hasn't booted up in %d seconds" % self.task_timeout)
raise
try:
no_routing_start, upper_replies = async_forward_stop.get(timeout=self.task_timeout)
self.log("Data plane was stopped, Waiting until it's up. Stop time: %s" % str(no_routing_start))
except TimeoutError:
self.log("Data plane never stop")
routing_always = True
upper_replies = [self.nr_vl_pkts]
if no_routing_start is not None:
no_routing_stop, _ = self.timeout(self.check_forwarding_resume,
self.task_timeout,
"DUT hasn't started to work for %d seconds" % self.task_timeout)
else:
no_routing_stop = datetime.datetime.min
no_routing_start = datetime.datetime.min
# Stop watching DUT
self.watching = False
if self.reboot_type == 'warm-reboot':
self.send_and_sniff()
# Stop watching DUT
self.watching = False
self.log("Stopping reachability state watch thread.")
self.watcher_is_stopped.wait(timeout = 10) # Wait for the Watcher stopped.
self.save_sniffed_packets()
examine_start = datetime.datetime.now()
self.log("Packet flow examine started %s after the reboot" % str(examine_start - self.reboot_start))
self.examine_flow()
self.log("Packet flow examine finished after %s" % str(datetime.datetime.now() - examine_start))
if self.lost_packets:
no_routing_stop, no_routing_start = datetime.datetime.fromtimestamp(self.no_routing_stop), datetime.datetime.fromtimestamp(self.no_routing_start)
self.log("The longest disruption lasted %.3f seconds. %d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id))
self.log("Total disruptions count is %d. All disruptions lasted %.3f seconds. Total %d packet(s) lost" % \
(self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets))
else:
no_routing_start = self.reboot_start
no_routing_stop = self.reboot_start
# wait until all bgp session are established
self.log("Wait until bgp routing is up on all devices")
for _, q in self.ssh_jobs:
q.put('quit')
def wait_for_ssh_threads():
while any(thr.is_alive() for thr, _ in self.ssh_jobs):
for _, q in self.ssh_jobs:
q.put('go')
time.sleep(self.TIMEOUT)
for thr, _ in self.ssh_jobs:
thr.join()
self.timeout(wait_for_ssh_threads, self.task_timeout, "SSH threads haven't finished for %d seconds" % self.task_timeout)
self.log("Data plane works again. Start time: %s" % str(no_routing_stop))
self.log("")
if self.reboot_type == 'fast-reboot':
no_cp_replies = self.extract_no_cpu_replies(upper_replies)
if no_routing_stop - no_routing_start > self.limit:
self.fails['dut'].add("Downtime must be less then %s seconds. It was %s" \
% (self.test_params['reboot_limit_in_seconds'], str(no_routing_stop - no_routing_start)))
if no_routing_stop - self.reboot_start > datetime.timedelta(seconds=self.test_params['graceful_limit']):
self.fails['dut'].add("%s cycle must be less than graceful limit %s seconds" % (self.reboot_type, self.test_params['graceful_limit']))
if self.reboot_type == 'fast-reboot' and no_cp_replies < 0.95 * self.nr_vl_pkts:
self.fails['dut'].add("Dataplane didn't route to all servers, when control-plane was down: %d vs %d" % (no_cp_replies, self.nr_vl_pkts))
if self.reboot_type == 'warm-reboot':
if self.preboot_oper is not None and self.pre_handle is not None:
self.log("Postboot checks:")
log_info, fails = self.pre_handle.verify(pre_check=False)
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
else:
# verify there are no interface flaps after warm boot
self.neigh_lag_status_check()
except Exception as e:
self.fails['dut'].add(e)
finally:
# Stop watching DUT
self.watching = False
# revert to pretest state
if self.preboot_oper is not None and self.pre_handle is not None:
self.log("Revert to preboot state:")
self.pre_handle.revert()
self.log(" ")
# Generating report
self.log("="*50)
self.log("Report:")
self.log("="*50)
self.log("LACP/BGP were down for (extracted from cli):")
self.log("-"*50)
for ip in sorted(self.cli_info.keys()):
self.log(" %s - lacp: %7.3f (%d) po_events: (%d) bgp v4: %7.3f (%d) bgp v6: %7.3f (%d)" \
% (ip, self.cli_info[ip]['lacp'][1], self.cli_info[ip]['lacp'][0], \
self.cli_info[ip]['po'][1], \
self.cli_info[ip]['bgp_v4'][1], self.cli_info[ip]['bgp_v4'][0],\
self.cli_info[ip]['bgp_v6'][1], self.cli_info[ip]['bgp_v6'][0]))
self.log("-"*50)
self.log("Extracted from VM logs:")
self.log("-"*50)
for ip in sorted(self.logs_info.keys()):
self.log("Extracted log info from %s" % ip)
for msg in sorted(self.logs_info[ip].keys()):
if not msg in [ 'error', 'route_timeout' ]:
self.log(" %s : %d" % (msg, self.logs_info[ip][msg]))
else:
self.log(" %s" % self.logs_info[ip][msg])
self.log("-"*50)
self.log("Summary:")
self.log("-"*50)
if no_routing_stop:
self.log("Downtime was %s" % str(no_routing_stop - no_routing_start))
reboot_time = "0:00:00" if routing_always else str(no_routing_stop - self.reboot_start)
self.log("Reboot time was %s" % reboot_time)
self.log("Expected downtime is less then %s" % self.limit)
if self.reboot_type == 'fast-reboot' and no_cp_replies:
self.log("How many packets were received back when control plane was down: %d Expected: %d" % (no_cp_replies, self.nr_vl_pkts))
has_info = any(len(info) > 0 for info in self.info.values())
if has_info:
self.log("-"*50)
self.log("Additional info:")
self.log("-"*50)
for name, info in self.info.items():
for entry in info:
self.log("INFO:%s:%s" % (name, entry))
self.log("-"*50)
is_good = all(len(fails) == 0 for fails in self.fails.values())
errors = ""
if not is_good:
self.log("-"*50)
self.log("Fails:")
self.log("-"*50)
errors = "\n\nSomething went wrong. Please check output below:\n\n"
for name, fails in self.fails.items():
for fail in fails:
self.log("FAILED:%s:%s" % (name, fail))
errors += "FAILED:%s:%s\n" % (name, fail)
self.log("="*50)
self.assertTrue(is_good, errors)
def neigh_lag_status_check(self):
"""
Ensure there are no interface flaps after warm-boot
"""
for neigh in self.ssh_targets:
self.neigh_handle = Arista(neigh, None, self.test_params)
self.neigh_handle.connect()
fails, flap_cnt = self.neigh_handle.verify_neigh_lag_no_flap()
self.neigh_handle.disconnect()
self.fails[neigh] |= fails
if not flap_cnt:
self.log("No LAG flaps seen on %s after warm boot" % neigh)
else:
self.fails[neigh].add("LAG flapped %s times on %s after warm boot" % (flap_cnt, neigh))
def extract_no_cpu_replies(self, arr):
"""
This function tries to extract number of replies from dataplane, when control plane is non working
"""
# remove all tail zero values
non_zero = filter(lambda x : x > 0, arr)
# check that last value is different from previos
if len(non_zero) > 1 and non_zero[-1] < non_zero[-2]:
return non_zero[-2]
else:
return non_zero[-1]
def reboot_dut(self):
time.sleep(self.reboot_delay)
self.log("Rebooting remote side")
stdout, stderr, return_code = self.cmd(["ssh", "-oStrictHostKeyChecking=no", self.dut_ssh, "sudo " + self.reboot_type])
if stdout != []:
self.log("stdout from %s: %s" % (self.reboot_type, str(stdout)))
if stderr != []:
self.log("stderr from %s: %s" % (self.reboot_type, str(stderr)))
self.log("return code from %s: %s" % (self.reboot_type, str(return_code)))
# Note: a timeout reboot in ssh session will return a 255 code
if return_code not in [0, 255]:
thread.interrupt_main()
return
def cmd(self, cmds):
process = subprocess.Popen(cmds,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return_code = process.returncode
return stdout, stderr, return_code
def peer_state_check(self, ip, queue):
ssh = Arista(ip, queue, self.test_params)
self.fails[ip], self.info[ip], self.cli_info[ip], self.logs_info[ip] = ssh.run()
def wait_until_cpu_port_down(self):
while True:
for _, q in self.ssh_jobs:
q.put('go')
if self.cpu_state.get() == 'down':
break
time.sleep(self.TIMEOUT)
def wait_until_cpu_port_up(self):
while True:
for _, q in self.ssh_jobs:
q.put('go')
if self.cpu_state.get() == 'up':
break
time.sleep(self.TIMEOUT)
def apply_filter_all_ports(self, filter_expression):
for p in self.dataplane.ports.values():
port = p.get_packet_source()
scapyall.attach_filter(port.socket, filter_expression)
def send_in_background(self, packets_list = None, interval = None):
"""
This method sends predefined list of packets with predefined interval.
"""
if not interval:
interval = self.send_interval
if not packets_list:
packets_list = self.packets_list
self.sniffer_started.wait(timeout=10)
with self.dataplane_io_lock:
# While running fast data plane sender thread there are two reasons for filter to be applied
# 1. filter out data plane traffic which is tcp to free up the load on PTF socket (sniffer thread is using a different one)
# 2. during warm neighbor restoration DUT will send a lot of ARP requests which we are not interested in
# This is essential to get stable results
self.apply_filter_all_ports('not (arp and ether src {}) and not tcp'.format(self.test_params['dut_mac']))
sender_start = datetime.datetime.now()
self.log("Sender started at %s" % str(sender_start))
for entry in packets_list:
time.sleep(interval)
testutils.send_packet(self, *entry)
self.log("Sender has been running for %s" % str(datetime.datetime.now() - sender_start))
# Remove filter
self.apply_filter_all_ports('')
def sniff_in_background(self, wait = None):
"""
This function listens on all ports, in both directions, for the TCP src=1234 dst=5000 packets, until timeout.
Once found, all packets are dumped to local pcap file,
and all packets are saved to self.packets as scapy type.
The native scapy.snif() is used as a background thread, to allow delayed start for the send_in_background().
"""
if not wait:
wait = self.time_to_listen + self.test_params['sniff_time_incr']
sniffer_start = datetime.datetime.now()
self.log("Sniffer started at %s" % str(sniffer_start))
sniff_filter = "tcp and tcp dst port 5000 and tcp src port 1234 and not icmp"
scapy_sniffer = threading.Thread(target=self.scapy_sniff, kwargs={'wait': wait, 'sniff_filter': sniff_filter})
scapy_sniffer.start()
time.sleep(2) # Let the scapy sniff initialize completely.
self.sniffer_started.set() # Unblock waiter for the send_in_background.
scapy_sniffer.join()
self.log("Sniffer has been running for %s" % str(datetime.datetime.now() - sniffer_start))
self.sniffer_started.clear()
def save_sniffed_packets(self):
filename = "/tmp/capture_%s.pcap" % self.preboot_oper if self.preboot_oper is not None else "/tmp/capture.pcap"
if self.packets:
scapyall.wrpcap(filename, self.packets)
self.log("Pcap file dumped to %s" % filename)
else:
self.log("Pcap file is empty.")
def scapy_sniff(self, wait = 180, sniff_filter = ''):
"""
This method exploits native scapy sniff() method.
"""
self.packets = scapyall.sniff(timeout = wait, filter = sniff_filter)
def send_and_sniff(self):
"""
This method starts two background threads in parallel:
one for sending, another for collecting the sent packets.
"""
self.sender_thr = threading.Thread(target = self.send_in_background)
self.sniff_thr = threading.Thread(target = self.sniff_in_background)
self.sniffer_started = threading.Event() # Event for the sniff_in_background status.
self.sniff_thr.start()
self.sender_thr.start()
self.sniff_thr.join()
self.sender_thr.join()
def check_tcp_payload(self, packet):
"""
This method is used by examine_flow() method.
It returns True if a packet is not corrupted and has a valid TCP sequential TCP Payload, as created by generate_bidirectional() method'.
"""
try:
int(str(packet[scapyall.TCP].payload)) in range(self.packets_to_send)
return True
except Exception as err:
return False
def no_flood(self, packet):
"""
This method filters packets which are unique (i.e. no floods).
"""
if (not int(str(packet[scapyall.TCP].payload)) in self.unique_id) and (packet[scapyall.Ether].src == self.dut_mac):
# This is a unique (no flooded) received packet.
self.unique_id.append(int(str(packet[scapyall.TCP].payload)))
return True
elif packet[scapyall.Ether].dst == self.dut_mac:
# This is a sent packet.
return True
else:
return False
def examine_flow(self, filename = None):
"""
This method examines pcap file (if given), or self.packets scapy file.
The method compares TCP payloads of the packets one by one (assuming all payloads are consecutive integers),
and the losses if found - are treated as disruptions in Dataplane forwarding.
All disruptions are saved to self.lost_packets dictionary, in format:
disrupt_start_id = (missing_packets_count, disrupt_time, disrupt_start_timestamp, disrupt_stop_timestamp)
"""
if filename:
all_packets = scapyall.rdpcap(filename)
elif self.packets:
all_packets = self.packets
else:
self.log("Filename and self.packets are not defined.")
self.fails['dut'].add("Filename and self.packets are not defined")
return None
# Filter out packets and remove floods:
self.unique_id = list() # This list will contain all unique Payload ID, to filter out received floods.
filtered_packets = [ pkt for pkt in all_packets if
scapyall.TCP in pkt and
not scapyall.ICMP in pkt and
pkt[scapyall.TCP].sport == 1234 and
pkt[scapyall.TCP].dport == 5000 and
self.check_tcp_payload(pkt) and
self.no_flood(pkt)
]
# Re-arrange packets, if delayed, by Payload ID and Timestamp:
packets = sorted(filtered_packets, key = lambda packet: (int(str(packet[scapyall.TCP].payload)), packet.time ))
self.lost_packets = dict()
self.max_disrupt, self.total_disruption = 0, 0
sent_packets = dict()
self.fails['dut'].add("Sniffer failed to capture any traffic")
self.assertTrue(packets, "Sniffer failed to capture any traffic")
self.fails['dut'].clear()
if packets:
prev_payload, prev_time = 0, 0
sent_payload = 0
received_counter = 0 # Counts packets from dut.
self.disruption_start, self.disruption_stop = None, None
for packet in packets:
if packet[scapyall.Ether].dst == self.dut_mac:
# This is a sent packet - keep track of it as payload_id:timestamp.
sent_payload = int(str(packet[scapyall.TCP].payload))
sent_packets[sent_payload] = packet.time
continue
if packet[scapyall.Ether].src == self.dut_mac:
# This is a received packet.
received_time = packet.time
received_payload = int(str(packet[scapyall.TCP].payload))
received_counter += 1
if not (received_payload and received_time):
# This is the first valid received packet.
prev_payload = received_payload
prev_time = received_time
continue
if received_payload - prev_payload > 1:
# Packets in a row are missing, a disruption.
lost_id = (received_payload -1) - prev_payload # How many packets lost in a row.
disrupt = (sent_packets[received_payload] - sent_packets[prev_payload + 1]) # How long disrupt lasted.
# Add disrupt to the dict:
self.lost_packets[prev_payload] = (lost_id, disrupt, received_time - disrupt, received_time)
self.log("Disruption between packet ID %d and %d. For %.4f " % (prev_payload, received_payload, disrupt))
if not self.disruption_start:
self.disruption_start = datetime.datetime.fromtimestamp(prev_time)
self.disruption_stop = datetime.datetime.fromtimestamp(received_time)
prev_payload = received_payload
prev_time = received_time
self.fails['dut'].add("Sniffer failed to filter any traffic from DUT")
self.assertTrue(received_counter, "Sniffer failed to filter any traffic from DUT")
self.fails['dut'].clear()
if self.lost_packets:
self.disrupts_count = len(self.lost_packets) # Total disrupt counter.
# Find the longest loss with the longest time:
max_disrupt_from_id, (self.max_lost_id, self.max_disrupt_time, self.no_routing_start, self.no_routing_stop) = \
max(self.lost_packets.items(), key = lambda item:item[1][0:2])
self.total_disrupt_packets = sum([item[0] for item in self.lost_packets.values()])
self.total_disrupt_time = sum([item[1] for item in self.lost_packets.values()])
self.log("Disruptions happen between %s and %s after the reboot." % \
(str(self.disruption_start - self.reboot_start), str(self.disruption_stop - self.reboot_start)))
else:
self.log("Gaps in forwarding not found.")
self.log("Total incoming packets captured %d" % received_counter)
if packets:
filename = '/tmp/capture_filtered.pcap' if self.preboot_oper is None else "/tmp/capture_filtered_%s.pcap" % self.preboot_oper
scapyall.wrpcap(filename, packets)
self.log("Filtered pcap dumped to %s" % filename)
def check_forwarding_stop(self):
self.asic_start_recording_vlan_reachability()
while True:
state = self.asic_state.get()
for _, q in self.ssh_jobs:
q.put('go')
if state == 'down':
break
time.sleep(self.TIMEOUT)
self.asic_stop_recording_vlan_reachability()
return self.asic_state.get_state_time(state), self.get_asic_vlan_reachability()
def check_forwarding_resume(self):
while True:
state = self.asic_state.get()
if state != 'down':
break
time.sleep(self.TIMEOUT)
return self.asic_state.get_state_time(state), self.get_asic_vlan_reachability()
def ping_data_plane(self, light_probe=True):
replies_from_servers = self.pingFromServers()
if replies_from_servers > 0 or not light_probe:
replies_from_upper = self.pingFromUpperTier()
else:
replies_from_upper = 0
return replies_from_servers, replies_from_upper
def wait_dut_to_warm_up(self):
# When the DUT is freshly rebooted, it appears that it needs to warm
# up towards PTF docker. In practice, I've seen this warm up taking
# up to ~70 seconds.
fail = None
dut_stabilize_secs = int(self.test_params['dut_stabilize_secs'])
warm_up_timeout_secs = int(self.test_params['warm_up_timeout_secs'])
start_time = datetime.datetime.now()
up_time = None
# First wait until DUT data/control planes are up
while True:
dataplane = self.asic_state.get()
ctrlplane = self.cpu_state.get()
elapsed = (datetime.datetime.now() - start_time).total_seconds()
if dataplane == 'up' and ctrlplane == 'up':
if not up_time:
up_time = datetime.datetime.now()
up_secs = (datetime.datetime.now() - up_time).total_seconds()
if up_secs > dut_stabilize_secs:
break;
else:
# reset up_time
up_time = None
if elapsed > warm_up_timeout_secs:
raise Exception("Control plane didn't come up within warm up timeout")
time.sleep(1)
# check until flooding is over. Flooding happens when FDB entry of
# certain host is not yet learnt by the ASIC, therefore it sends
# packet to all vlan ports.
uptime = datetime.datetime.now()
while True:
elapsed = (datetime.datetime.now() - start_time).total_seconds()
if not self.asic_state.is_flooding() and elapsed > dut_stabilize_secs:
break
if elapsed > warm_up_timeout_secs:
if self.allow_vlan_flooding:
break
raise Exception("Data plane didn't stop flooding within warm up timeout")
time.sleep(1)
dataplane = self.asic_state.get()
ctrlplane = self.cpu_state.get()
if not dataplane == 'up':
fail = "Data plane"
elif not ctrlplane == 'up':
fail = "Control plane"
if fail is not None:
raise Exception("{} went down while waiting for flooding to stop".format(fail))
if self.asic_state.get_state_time('up') > uptime:
fail = "Data plane"
elif self.cpu_state.get_state_time('up') > uptime:
fail = "Control plane"
if fail is not None:
raise Exception("{} flapped while waiting for the warm up".format(fail))
# Everything is good
def check_alive(self):
# This function checks that DUT routes the packets in the both directions.
#
# Sometimes first attempt failes because ARP responses to DUT are not so fast.
# But after this the function expects to see steady "replies".
# If the function sees that there is an issue with the dataplane after we saw
# successful replies it considers that the DUT is not healthy
#
# Sometimes I see that DUT returns more replies then requests.
# I think this is because of not populated FDB table
# The function waits while it's done
uptime = None
for counter in range(self.nr_tests * 2):
state = self.asic_state.get()
if state == 'up':
if not uptime:
uptime = self.asic_state.get_state_time(state)
else:
if uptime:
raise Exception("Data plane stopped working")
time.sleep(2)
# wait, until FDB entries are populated
for _ in range(self.nr_tests * 10): # wait for some time
if self.asic_state.is_flooding():
time.sleep(2)
else:
break
else:
raise Exception("DUT is flooding")
def get_asic_vlan_reachability(self):
return self.asic_vlan_reach
def asic_start_recording_vlan_reachability(self):
with self.vlan_lock:
self.asic_vlan_reach = []
self.recording = True
def asic_stop_recording_vlan_reachability(self):
with self.vlan_lock:
self.recording = False
def try_record_asic_vlan_recachability(self, t1_to_vlan):
with self.vlan_lock:
if self.recording:
self.asic_vlan_reach.append(t1_to_vlan)
def log_asic_state_change(self, reachable, partial=False, t1_to_vlan=0, flooding=False):
old = self.asic_state.get()
if reachable:
state = 'up' if not partial else 'partial'
else:
state = 'down'
self.try_record_asic_vlan_recachability(t1_to_vlan)
self.asic_state.set_flooding(flooding)
if old != state:
self.log("Data plane state transition from %s to %s (%d)" % (old, state, t1_to_vlan))
self.asic_state.set(state)
def log_cpu_state_change(self, reachable, partial=False, flooding=False):
old = self.cpu_state.get()
if reachable:
state = 'up' if not partial else 'partial'
else:
state = 'down'
self.cpu_state.set_flooding(flooding)
if old != state:
self.log("Control plane state transition from %s to %s" % (old, state))
self.cpu_state.set(state)
def log_vlan_state_change(self, reachable):
old = self.vlan_state.get()
if reachable:
state = 'up'
else:
state = 'down'
if old != state:
self.log("VLAN ARP state transition from %s to %s" % (old, state))
self.vlan_state.set(state)
def reachability_watcher(self):
# This function watches the reachability of the CPU port, and ASIC. It logs the state
# changes for future analysis
self.watcher_is_stopped.clear() # Watcher is running.
while self.watching:
if self.dataplane_io_lock.acquire(False):
vlan_to_t1, t1_to_vlan = self.ping_data_plane(self.light_probe)
reachable = (t1_to_vlan > self.nr_vl_pkts * 0.7 and
vlan_to_t1 > self.nr_pc_pkts * 0.7)
partial = (reachable and
(t1_to_vlan < self.nr_vl_pkts or
vlan_to_t1 < self.nr_pc_pkts))
flooding = (reachable and
(t1_to_vlan > self.nr_vl_pkts or
vlan_to_t1 > self.nr_pc_pkts))
self.log_asic_state_change(reachable, partial, t1_to_vlan, flooding)
self.dataplane_io_lock.release()
total_rcv_pkt_cnt = self.pingDut()
reachable = total_rcv_pkt_cnt > 0 and total_rcv_pkt_cnt > self.ping_dut_pkts * 0.7
partial = total_rcv_pkt_cnt > 0 and total_rcv_pkt_cnt < self.ping_dut_pkts
flooding = reachable and total_rcv_pkt_cnt > self.ping_dut_pkts
self.log_cpu_state_change(reachable, partial, flooding)
total_rcv_pkt_cnt = self.arpPing()
reachable = total_rcv_pkt_cnt >= self.arp_ping_pkts
self.log_vlan_state_change(reachable)
self.watcher_is_running.set() # Watcher is running.
self.watcher_is_stopped.set() # Watcher has stopped.
self.watcher_is_running.clear() # Watcher has stopped.
def pingFromServers(self):
for i in xrange(self.nr_pc_pkts):
testutils.send_packet(self, self.from_server_src_port, self.from_vlan_packet)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_vlan_exp_packet, self.from_server_dst_ports, timeout=self.TIMEOUT)
self.log("Send %5d Received %5d servers->t1" % (self.nr_pc_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def pingFromUpperTier(self):
for entry in self.from_t1:
testutils.send_packet(self, *entry)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_t1_exp_packet, self.vlan_ports, timeout=self.TIMEOUT)
self.log("Send %5d Received %5d t1->servers" % (self.nr_vl_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def pingDut(self):
for i in xrange(self.ping_dut_pkts):
testutils.send_packet(self, self.random_port(self.vlan_ports), self.ping_dut_packet)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.ping_dut_exp_packet, self.vlan_ports, timeout=self.TIMEOUT)
self.log("Send %5d Received %5d ping DUT" % (self.ping_dut_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def arpPing(self):
for i in xrange(self.arp_ping_pkts):
testutils.send_packet(self, self.arp_src_port, self.arp_ping)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.arp_resp, [self.arp_src_port], timeout=self.TIMEOUT)
self.log("Send %5d Received %5d arp ping" % (self.arp_ping_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
|
Enumerate.py | # READ THE DOCUMENTATION FOR ENUMLIB
#from io import StringIO, BytesIO, FileIO
import argparse
import io
from numpy import array
import os
from random import uniform
import subprocess as sp
import yaml
from multiprocessing import cpu_count, Process, Manager, Queue
from ase import Atoms
from ase.db import connect
from ase.io import read as read_ase
from ase.build import bulk
from mlippy.makeStr import *
from mlippy.makeStr import _read_enum_out, _map_enumStr_to_real_space, _cartesian2direct, _get_lattice_parameter
def parse_args():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('yaml', nargs='+',
help='One or more yaml files ready for enumeration')
parser.add_argument('--clear', type=bool,
help='Clear previous database or not')
args = parser.parse_args()
return args
class Enumerate():
"""
Contains all the parameters necessary to enumerate bulk structures for training an MTP
TODO: Add capability to handle restrictions on lattice points
makeStr default arguments:
default_args = {
'examples': False,
'verbose': False,
'action': 'print',
'debug': False,
'structures': None, #['all']
'displace': 0.0,
'input': 'struct_enum.out',
'mink': 't',
'species': [],
'species_mapping': [],
# 'outfile': 'vasp.{}',
'rattle': 0.0,
'config': 'f',
'remove_zeros': 'f'
}
"""
def __init__(self, yaml_path):
# Setup the enumeration class by reading in a yaml file
self.read_yaml(yaml_path)
manager = Manager()
self.db_return = manager.dict()
print(self.db_return)
self.SetupProcess()
self.clear = False
self.set_db_path()
def set_db_path(self, db_name='enum.db'):
self.db_path = os.path.join(self.setup_path, db_name)
def read_yaml(self, yaml_path):
"""Read the configuration yaml file for the alloy database"""
with open(yaml_path) as yp:
yml = yaml.safe_load(yp)
self.species = list(yml['species'].values())
print(self.species)
self.nspecies = len(self.species)
alloy = ''.join(self.species)
self.database_path = os.path.join(yml['ROOT'], alloy)
self.setup_path = os.path.join(self.database_path, 'setup')
self.__dict__.update(yml['enumeration'])
def ClearDatabase(self):
enum_db_path = os.path.join(self.setup_path, 'enum.db')
if os.path.isfile(enum_db_path):
os.remove(enum_db_path)
def SetupProcess(self):
"""
Using the `ase.build.bulk` module, we calculate the lattice vectors and lattice points
that will be used in the enumeration.
"""
self.Process = []
for parent in self.lattice:
enum_path = os.path.join(self.setup_path, parent)
struct_enum = os.path.join(enum_path, 'struct_enum.in')
if not os.path.isdir(enum_path):
os.mkdir(enum_path)
struct = bulk('X', parent, a=1)
npoints = len(struct.positions)
# print(parent)
with open(struct_enum, 'w') as f:
print('Writing input for:', parent)
print(parent, file=f)
print('bulk', file=f)
for vec in struct.cell:
print(*vec, sep=' ', file=f)
print(self.nspecies, '-nary case', file=f)
print(' ', npoints, ' # number of points in lattice', file=f)
for pos in struct.positions:
print(*pos, self.restrictions, sep=' ', file=f)
from math import ceil
cell_size = [ ceil( dim / npoints ) for dim in self.cell_size ] # fixes size for multilattices
print(cell_size)
# cell_size[1] = int(cell_size[1] / npoints)
print(*cell_size, sep=' ', file=f)
print(self.finite_precision, file=f)
print(self.labelings, file=f)
try:
for conc in self.concentrations:
print(conc, file=f)
except TypeError:
print('No concentration restrictions applied')
# initizlize the Processes
# print(enum_path, "ENUM_PATH")
#self.ClearDatabase()
#q =
# q.put(enum_path)
# p = Process(target=self._EnumerateStructs, args=(q,))
#return_dict = self.manager.dict()
p = Process(target=self._EnumerateStructs, args=(enum_path,self.db_return))
# q.put(p)
self.Process.append(p)
def _EnumerateStructs(self, enum_path, db_return):
"""
Worker process for enumeration and generating the atoms database
"""
with open(os.path.join(enum_path, 'enum.out'), 'wb') as f:
# Cleanup before enum.x
my_env = os.environ.copy()
sp.run(['rm',
'enum.out',
'vasp.cfg'],
cwd=enum_path,
stderr=sp.PIPE,
stdout=sp.PIPE,
env=my_env
)
# Run enum.x
print('Running: enum.x')
sp.run(['enum.x'], cwd=enum_path, stderr=f, stdout=f, env=my_env)
# Convert the `struct_enum.out` file into an Atoms database
#args = self.makeStr
self.makeStr['species'] = self.species
structs = self.makeStr['structures']
if type(structs) is str:
# test to see if a range has been given
try:
bounds = [ int(i) for i in structs.split(' ') ]
if len(bounds) == 2:
bounds[1] = bounds[1] + 1
# print(bounds)
self.makeStr['structures'] = range(*bounds)
except:
list_structs = [int(i) for i in structs.split(',')]
self.makeStr['structures'] = list_structs
# Build the database and run the makeStr functions
atoms = make_database(enum_path, self.makeStr)
self.db_return[enum_path] = atoms
def RunEnum(self):
for p in self.Process:
p.start()
for p in self.Process:
p.join()
db_vals = self.db_return.values()
with connect(self.db_path) as db:
for parent in db_vals:
#print("Adding", parent)
for struct in parent:
db.write(struct)
# Write the structures to an ASE object using the makeStr.py template from writing poscars
def make_database(enum_path, args):
"""Writes a vasp POSCAR style file for the input structure and system
data.
:arg system_data: a dictionary of the system_data
:arg space_data: a dictionary containing the spacial data
:arg structure_data: a dictionary of the data for this structure
:arg args: Dictionary of user supplied input.
"""
os.chdir(enum_path)
(system, structure_data) = _read_enum_out(args)
atoms = []
for structure in structure_data:
# space_data is a dictionary containing the spacial data for
# the structure
space_data = _map_enumStr_to_real_space(system,structure,args["mink"])
space_data["aBas"] = _cartesian2direct(space_data["sLV"],
space_data["aBas"],
system["eps"])
atom = write_ASE(system, space_data, structure, args)
atoms.append(atom)
print("ATOMS:", len(atoms))
#return atoms
database_path = os.path.join('..', 'enum.db')
#with connect(database_path, append=True) as db:
# for struct in atoms:
# #print(struct)
# db.write(struct)
print(database_path)
return atoms
# with connect(database_path) as db:
# counter = 1
#
# # i = len(db)
# if i == 0:
# i = 1#
#
# print('DB len:', i)
#
# while counter < len(atoms):
# i += 1
# struct_id = db.reserve(name=i)
# print(i)
#
# if struct_id is None:
# print("ID Reserved. Choosing new ID... ", i)
#
# else:
# print("Writing crystal to database: ", counter, i)
# db.write(atoms[counter], id=struct_id, name=i)
# counter += 1
#
# i += 1
## Define the functions that interpret the `struct_enum.out` file
def _write_ASE_OLD(system_data,space_data,structure_data,args):
"""Writes a string in vasp POSCAR style for the input structure and system
data and then stores that that data as an ASE Atoms object.
Code adapted from `Enumlib.makeStr.py`.
:arg system_data: a dictionary of the system_data
:arg space_data: a dictionary containing the spacial data
:arg structure_data: a dictionary of the data for this structure
:arg args: Dictionary of user supplied input.
"""
# Get the labeling, group index, structure number and arrow labels
# from the input data structure.
labeling = structure_data["labeling"]
gIndx = space_data["gIndx"]
arrows = structure_data["directions"]
struct_n = structure_data["strN"]
# The arrow basis.
arrow_directions = [[1,0,0],[-1,0,0],[0,1,0],[0,-1,0],[0,0,1],[0,0,-1]]
directions = []
# Construct the concentrations of the atoms from the labeling by
# counting the number of each type of atom present in the
# labeling.
concs = []
for i in range(system_data["k"]):
this_conc = 0
for atom in range(structure_data["n"]*system_data["nD"]):
if labeling[gIndx[atom]] == str(i):
this_conc += 1
concs.append(this_conc)
def_title = '' #"{} str #: {}".format(str(system_data["title"]),str(structure_data["strN"]))
# Get the lattice parameter for the atomic species provided by the
# user.
lattice_parameter, _title = _get_lattice_parameter(args["species"],concs,
system_data["plattice"],system_data["nD"],
def_title,remove_zeros=args["remove_zeros"])
title = ' '.join(_title.strip('\n').split())
for arrow in arrows:
directions.append(array(arrow_directions[int(arrow)]))
sLV = space_data["sLV"]
# Start writing the data to the file.
with io.StringIO() as poscar:
# First write the title and the lattice parameter.
print(title, file=poscar)
print(lattice_parameter, file=poscar)
# Then write out the lattice vectors.
for i in range(3):
print(*sLV[i], sep=' ', file=poscar)
cell = sLV
# Write the concentrations to the output file. If the species
# strings were passed in by the user and the user requests
# there be no zeros in the concentration string then we should
# remove them from the file. Otherwise we default to leaving
# them in.
if (args["remove_zeros"] and args["species"] is not None):
conc_str = ''
for ic in concs:
if ic != 0:
poscar.write("{} ".format(str(ic)))
# print(*concs, sep=' ', file=poscar)
print("\nD", file=poscar)
# Now write out the atomic positions to the file.
for ilab in range(system_data["k"]):
for iAt in range(structure_data["n"]*system_data["nD"]):
rattle = uniform(-args["rattle"],args["rattle"])
displace = directions[iAt]*args["displace"]*lattice_parameter
# If the displacement is non zero and we're `rattling`
# the system then we need to modify the displacement
# by the amount being rattled.
displace += displace*rattle
if labeling[gIndx[iAt]] == str(ilab):
# The final atomic position is the position from
# the basis plus the total displacement.
out_array = array(space_data["aBas"][iAt]) + displace
poscar.write(" {}\n".format(
" ".join(["{0: .8f}".format(i) for i in out_array.tolist()])))
# _poscar = poscar.getvalue()
_poscar = poscar.getvalue()
atom = read_ase(io.StringIO(_poscar), format='vasp')
return atom
def write_ASE(system_data,space_data,structure_data,args):
"""Writes a string in vasp POSCAR style for the input structure and system
data and then stores that that data as an ASE Atoms object.
Code adapted from `Enumlib.makeStr.py`.
:arg system_data: a dictionary of the system_data
:arg space_data: a dictionary containing the spacial data
:arg structure_data: a dictionary of the data for this structure
:arg args: Dictionary of user supplied input.
"""
# Get the labeling, group index, structure number and arrow labels
# from the input data structure.
labeling = structure_data["labeling"]
gIndx = space_data["gIndx"]
arrows = structure_data["directions"]
struct_n = structure_data["strN"]
# The arrow basis.
arrow_directions = [[1,0,0],[-1,0,0],[0,1,0],[0,-1,0],[0,0,1],[0,0,-1]]
concs = []
for i in range(system_data["k"]):
this_conc = 0
for atom in range(structure_data["n"]*system_data["nD"]):
if labeling[gIndx[atom]] == str(i):
this_conc += 1
concs.append(this_conc)
# print(system_data["k"])
def_title = '' #"{} str #: {}".format(str(system_data["title"]),str(structure_data["strN"]))
# Get the lattice parameter for the atomic species provided by the
lattice_parameter, _title = _get_lattice_parameter(args["species"],concs,
system_data["plattice"],system_data["nD"],
def_title,remove_zeros=args["remove_zeros"])
cell = array(space_data["sLV"]) * lattice_parameter
formula = ''.join([ args["species"][i] + str(concs[i]) for i in range(len(args['species'])) ])
positions = space_data["aBas"]
#initialize the ASE.Atoms object
crystal = Atoms(formula, pbc=True, positions=positions)
crystal.set_cell(cell, scale_atoms=True)
crystal.wrap()
# Apply a random rattle to the structure
# will eventually be able to take in all ASE.Atoms.rattle() arguments
try:
rattle = float(args["rattle"])
except:
if args["rattle"]: # If user gives `True` use default ASE.Atoms.rattle
crystal.rattle()
# Test if user input a cell displacement
try:
celldisp = float(args["displace"])
except:
celldisp = 0
return crystal
def parallelize(struct_enum_out, n_threads=1):
"""
struct_enum_out (str): path to the struct_enum_out file that you want to enumerate
n_threads (int): Number of threads to run on
"""
import subprocess as sp
with open(struct_enum_out, "r") as f:
n_structs = int(f.readlines()[-1].split()[0])
structs_per_thread = n_structs // n_threads
start = 1
for i in range(n_threads):
if i+1 == n_threads:
end = n_structs # This makes sure to grab the last few structures that weren't assigned with the floor division.
else:
end = start + structs_per_thread - 1
output_file = ''.join(["enum.tmp.", str(i+1)]) # set the output filename
args = ' '.join(["python", "~/automtp/mlippy/mlippy/",
str(start), str(end), "-config", "t", "-outfile", output_file, "&"])
sp.run(args,
shell=True,
cwd="/home/hayden/Documents/msg/examples",
)
print("thread", i+1, ":", start, end, "enumerated:", end-start)
#
start = end + 1 # set the next start value to the next structure
def main():
args = parse_args()
for alloy in args.yaml:
print("Enumerating:", alloy)
enum_alloy = Enumerate(alloy)
# print(enum_alloy.cell_size, type(enum_alloy.cell_size))
# enum_alloy.ClearDatabase()
enum_alloy.RunEnum()
if __name__ == "__main__":
main()
|
weixin.py | #!/usr/bin/env python
# coding: utf-8
import qrcode
import urllib
import urllib2
import cookielib
import requests
import xml.dom.minidom
import json
import time
import re
import sys
import os
import random
import multiprocessing
import platform
import logging
import httplib
from collections import defaultdict
from urlparse import urlparse
from lxml import html
#import pdb
# for media upload
import mimetypes
from requests_toolbelt.multipart.encoder import MultipartEncoder
def catchKeyboardInterrupt(fn):
def wrapper(*args):
try:
return fn(*args)
except KeyboardInterrupt:
print '\n[*] 强制退出程序'
logging.debug('[*] 强制退出程序')
return wrapper
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
class WebWeixin(object):
def __str__(self):
description = \
"=========================\n" + \
"[#] Web Weixin\n" + \
"[#] Debug Mode: " + str(self.DEBUG) + "\n" + \
"[#] Uuid: " + self.uuid + "\n" + \
"[#] Uin: " + str(self.uin) + "\n" + \
"[#] Sid: " + self.sid + "\n" + \
"[#] Skey: " + self.skey + "\n" + \
"[#] DeviceId: " + self.deviceId + "\n" + \
"[#] PassTicket: " + self.pass_ticket + "\n" + \
"========================="
return description
def __init__(self):
self.DEBUG = False
self.uuid = ''
self.base_uri = ''
self.redirect_uri = ''
self.uin = ''
self.sid = ''
self.skey = ''
self.pass_ticket = ''
self.deviceId = 'e' + repr(random.random())[2:17]
self.BaseRequest = {}
self.synckey = ''
self.SyncKey = []
self.User = []
self.MemberList = []
self.ContactList = [] # 好友
self.GroupList = [] # 群
self.GroupMemeberList = [] # 群友
self.PublicUsersList = [] # 公众号/服务号
self.SpecialUsersList = [] # 特殊账号
self.autoReplyMode = False
self.syncHost = ''
self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36'
self.interactive = False
self.autoOpen = False
self.saveFolder = os.path.join(os.getcwd(), 'saved')
self.saveSubFolders = {'webwxgeticon': 'icons', 'webwxgetheadimg': 'headimgs', 'webwxgetmsgimg': 'msgimgs',
'webwxgetvideo': 'videos', 'webwxgetvoice': 'voices', '_showQRCodeImg': 'qrcodes'}
self.appid = 'wx782c26e4c19acffb'
self.lang = 'zh_CN'
self.lastCheckTs = time.time()
self.memberCount = 0
self.SpecialUsers = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail', 'fmessage', 'tmessage', 'qmessage', 'qqsync', 'floatbottle', 'lbsapp', 'shakeapp', 'medianote', 'qqfriend', 'readerapp', 'blogapp', 'facebookapp', 'masssendapp', 'meishiapp', 'feedsapp',
'voip', 'blogappweixin', 'weixin', 'brandsessionholder', 'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'officialaccounts', 'notification_messages', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'wxitil', 'userexperience_alarm', 'notification_messages']
self.TimeOut = 20 # 同步最短时间间隔(单位:秒)
self.media_count = -1
self.cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
opener.addheaders = [('User-agent', self.user_agent)]
urllib2.install_opener(opener)
def loadConfig(self, config):
if config['DEBUG']:
self.DEBUG = config['DEBUG']
if config['autoReplyMode']:
self.autoReplyMode = config['autoReplyMode']
if config['user_agent']:
self.user_agent = config['user_agent']
if config['interactive']:
self.interactive = config['interactive']
if config['autoOpen']:
self.autoOpen = config['autoOpen']
def getUUID(self):
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': self.appid,
'fun': 'new',
'lang': self.lang,
'_': int(time.time()),
}
data = self._post(url, params, False)
if data == '':
return False
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, data)
if pm:
code = pm.group(1)
self.uuid = pm.group(2)
return code == '200'
return False
def genQRCode(self):
#return self._showQRCodeImg()
if sys.platform.startswith('win'):
self._showQRCodeImg()
else:
self._str2qr('https://login.weixin.qq.com/l/' + self.uuid)
def _showQRCodeImg(self):
url = 'https://login.weixin.qq.com/qrcode/' + self.uuid
params = {
't': 'webwx',
'_': int(time.time())
}
data = self._post(url, params, False)
if data == '':
return
QRCODE_PATH = self._saveFile('qrcode.jpg', data, '_showQRCodeImg')
os.startfile(QRCODE_PATH)
def waitForLogin(self, tip=1):
time.sleep(tip)
url = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s' % (
tip, self.uuid, int(time.time()))
data = self._get(url)
if data == '':
return False
pm = re.search(r'window.code=(\d+);', data)
code = pm.group(1)
if code == '201':
return True
elif code == '200':
pm = re.search(r'window.redirect_uri="(\S+?)";', data)
r_uri = pm.group(1) + '&fun=new'
self.redirect_uri = r_uri
self.base_uri = r_uri[:r_uri.rfind('/')]
return True
elif code == '408':
self._echo('[登陆超时] \n')
else:
self._echo('[登陆异常] \n')
return False
def login(self):
data = self._get(self.redirect_uri)
if data == '':
return False
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
self.skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
self.sid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
self.uin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
self.pass_ticket = node.childNodes[0].data
if '' in (self.skey, self.sid, self.uin, self.pass_ticket):
return False
self.BaseRequest = {
'Uin': int(self.uin),
'Sid': self.sid,
'Skey': self.skey,
'DeviceID': self.deviceId,
}
return True
def webwxinit(self):
url = self.base_uri + '/webwxinit?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
params = {
'BaseRequest': self.BaseRequest
}
dic = self._post(url, params)
if dic == '':
return False
self.SyncKey = dic['SyncKey']
self.User = dic['User']
# synckey for synccheck
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic['BaseResponse']['Ret'] == 0
def webwxstatusnotify(self):
url = self.base_uri + \
'/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Code": 3,
"FromUserName": self.User['UserName'],
"ToUserName": self.User['UserName'],
"ClientMsgId": int(time.time())
}
dic = self._post(url, params)
if dic == '':
return False
return dic['BaseResponse']['Ret'] == 0
def webwxgetcontact(self):
SpecialUsers = self.SpecialUsers
url = self.base_uri + '/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
dic = self._post(url, {})
if dic == '':
return False
self.MemberCount = dic['MemberCount']
self.MemberList = dic['MemberList']
ContactList = self.MemberList[:]
GroupList = self.GroupList[:]
PublicUsersList = self.PublicUsersList[:]
SpecialUsersList = self.SpecialUsersList[:]
for i in xrange(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
if Contact['VerifyFlag'] & 8 != 0: # 公众号/服务号
ContactList.remove(Contact)
self.PublicUsersList.append(Contact)
elif Contact['UserName'] in SpecialUsers: # 特殊账号
ContactList.remove(Contact)
self.SpecialUsersList.append(Contact)
elif '@@' in Contact['UserName']: # 群聊
ContactList.remove(Contact)
self.GroupList.append(Contact)
elif Contact['UserName'] == self.User['UserName']: # 自己
ContactList.remove(Contact)
self.ContactList = ContactList
return True
def webwxbatchgetcontact(self):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": len(self.GroupList),
"List": [{"UserName": g['UserName'], "EncryChatRoomId":""} for g in self.GroupList]
}
dic = self._post(url, params)
if dic == '':
return False
# blabla ...
ContactList = dic['ContactList']
ContactCount = dic['Count']
self.GroupList = ContactList
for i in xrange(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
MemberList = Contact['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return True
def getNameById(self, id):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": 1,
"List": [{"UserName": id, "EncryChatRoomId": ""}]
}
dic = self._post(url, params)
if dic == '':
return None
# blabla ...
return dic['ContactList']
def testsynccheck(self):
SyncHost = [
'webpush.weixin.qq.com',
#'webpush2.weixin.qq.com',
'webpush.wechat.com',
'webpush1.wechat.com',
'webpush2.wechat.com',
'webpush.wx.qq.com',
'webpush2.wx.qq.com'
# 'webpush.wechatapp.com'
]
for host in SyncHost:
self.syncHost = host
[retcode, selector] = self.synccheck()
if retcode == '0':
return True
return False
def synccheck(self):
params = {
'r': int(time.time()),
'sid': self.sid,
'uin': self.uin,
'skey': self.skey,
'deviceid': self.deviceId,
'synckey': self.synckey,
'_': int(time.time()),
}
url = 'https://' + self.syncHost + \
'/cgi-bin/mmwebwx-bin/synccheck?' + urllib.urlencode(params)
data = self._get(url)
if data == '':
return [-1,-1]
pm = re.search(
r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}', data)
retcode = pm.group(1)
selector = pm.group(2)
return [retcode, selector]
def webwxsync(self):
url = self.base_uri + \
'/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.sid, self.skey, self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
'SyncKey': self.SyncKey,
'rr': ~int(time.time())
}
dic = self._post(url, params)
if dic == '':
return None
if self.DEBUG:
print json.dumps(dic, indent=4)
(json.dumps(dic, indent=4))
if dic['BaseResponse']['Ret'] == 0:
self.SyncKey = dic['SyncKey']
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic
def webwxsendmsg(self, word, to='filehelper'):
url = self.base_uri + \
'/webwxsendmsg?pass_ticket=%s' % (self.pass_ticket)
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
params = {
'BaseRequest': self.BaseRequest,
'Msg': {
"Type": 1,
"Content": self._transcoding(word),
"FromUserName": self.User['UserName'],
"ToUserName": to,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxuploadmedia(self, image_name):
url = 'https://file2.wx.qq.com/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json'
# 计数器
self.media_count = self.media_count + 1
# 文件名
file_name = image_name
# MIME格式
# mime_type = application/pdf, image/jpeg, image/png, etc.
mime_type = mimetypes.guess_type(image_name, strict=False)[0]
# 微信识别的文档格式,微信服务器应该只支持两种类型的格式。pic和doc
# pic格式,直接显示。doc格式则显示为文件。
media_type = 'pic' if mime_type.split('/')[0] == 'image' else 'doc'
# 上一次修改日期
lastModifieDate = 'Thu Mar 17 2016 00:55:10 GMT+0800 (CST)'
# 文件大小
file_size = os.path.getsize(file_name)
# PassTicket
pass_ticket = self.pass_ticket
# clientMediaId
client_media_id = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
# webwx_data_ticket
webwx_data_ticket = ''
for item in self.cookie:
if item.name == 'webwx_data_ticket':
webwx_data_ticket = item.value
break
if (webwx_data_ticket == ''):
return "None Fuck Cookie"
uploadmediarequest = json.dumps({
"BaseRequest": self.BaseRequest,
"ClientMediaId": client_media_id,
"TotalLen": file_size,
"StartPos": 0,
"DataLen": file_size,
"MediaType": 4
}, ensure_ascii=False).encode('utf8')
multipart_encoder = MultipartEncoder(
fields={
'id': 'WU_FILE_' + str(self.media_count),
'name': file_name,
'type': mime_type,
'lastModifieDate': lastModifieDate,
'size': str(file_size),
'mediatype': media_type,
'uploadmediarequest': uploadmediarequest,
'webwx_data_ticket': webwx_data_ticket,
'pass_ticket': pass_ticket,
'filename': (file_name, open(file_name, 'rb'), mime_type.split('/')[1])
},
boundary='-----------------------------1575017231431605357584454111'
)
headers = {
'Host': 'file2.wx.qq.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:42.0) Gecko/20100101 Firefox/42.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://wx2.qq.com/',
'Content-Type': multipart_encoder.content_type,
'Origin': 'https://wx2.qq.com',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
r = requests.post(url, data=multipart_encoder, headers=headers)
response_json = r.json()
if response_json['BaseResponse']['Ret'] == 0:
return response_json
return None
def webwxsendmsgimg(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendmsgimg?fun=async&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 3,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxsendmsgemotion(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendemoticon?fun=sys&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 47,
"EmojiFlag": 2,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
if self.DEBUG:
print json.dumps(dic, indent=4)
logging.debug(json.dumps(dic, indent=4))
return dic['BaseResponse']['Ret'] == 0
def _saveFile(self, filename, data, api=None):
fn = filename
if self.saveSubFolders[api]:
dirName = os.path.join(self.saveFolder, self.saveSubFolders[api])
if not os.path.exists(dirName):
os.makedirs(dirName)
fn = os.path.join(dirName, filename)
logging.debug('Saved file: %s' % fn)
with open(fn, 'wb') as f:
f.write(data)
f.close()
return fn
def webwxgeticon(self, id):
url = self.base_uri + \
'/webwxgeticon?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgeticon')
def webwxgetheadimg(self, id):
url = self.base_uri + \
'/webwxgetheadimg?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgetheadimg')
def webwxgetmsgimg(self, msgid):
url = self.base_uri + \
'/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + msgid + '.jpg'
return self._saveFile(fn, data, 'webwxgetmsgimg')
# Not work now for weixin haven't support this API
def webwxgetvideo(self, msgid):
url = self.base_uri + \
'/webwxgetvideo?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url, api='webwxgetvideo')
if data == '':
return ''
fn = 'video_' + msgid + '.mp4'
return self._saveFile(fn, data, 'webwxgetvideo')
def webwxgetvoice(self, msgid):
url = self.base_uri + \
'/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'voice_' + msgid + '.mp3'
return self._saveFile(fn, data, 'webwxgetvoice')
def getGroupName(self, id):
name = '未知群'
for member in self.GroupList:
if member['UserName'] == id:
name = member['NickName']
if name == '未知群':
# 现有群里面查不到
GroupList = self.getNameById(id)
for group in GroupList:
self.GroupList.append(group)
if group['UserName'] == id:
name = group['NickName']
MemberList = group['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return name
def getUserRemarkName(self, id):
name = '未知群' if id[:2] == '@@' else '陌生人'
if id == self.User['UserName']:
return self.User['NickName'] # 自己
if id[:2] == '@@':
# 群
name = self.getGroupName(id)
else:
# 特殊账号
for member in self.SpecialUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 公众号或服务号
for member in self.PublicUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 直接联系人
for member in self.ContactList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 群友
for member in self.GroupMemeberList:
if member['UserName'] == id:
name = member['DisplayName'] if member[
'DisplayName'] else member['NickName']
if name == '未知群' or name == '陌生人':
logging.debug(id)
return name
def getUSerID(self, name):
for member in self.MemberList:
if name == member['RemarkName'] or name == member['NickName']:
return member['UserName']
return None
def _showMsg(self, message):
srcName = None
dstName = None
groupName = None
content = None
msg = message
logging.debug(msg)
if msg['raw_msg']:
srcName = self.getUserRemarkName(msg['raw_msg']['FromUserName'])
dstName = self.getUserRemarkName(msg['raw_msg']['ToUserName'])
content = msg['raw_msg']['Content'].replace(
'<', '<').replace('>', '>')
message_id = msg['raw_msg']['MsgId']
if content.find('http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1:
# 地理位置消息
data = self._get(content)
if data == '':
return
data.decode('gbk').encode('utf-8')
pos = self._searchContent('title', data, 'xml')
temp = self._get(content)
if temp == '':
return
tree = html.fromstring(temp)
url = tree.xpath('//html/body/div/img')[0].attrib['src']
for item in urlparse(url).query.split('&'):
if item.split('=')[0] == 'center':
loc = item.split('=')[-1:]
content = '%s 发送了一个 位置消息 - 我在 [%s](%s) @ %s]' % (
srcName, pos, url, loc)
if msg['raw_msg']['ToUserName'] == 'filehelper':
# 文件传输助手
dstName = '文件传输助手'
if msg['raw_msg']['FromUserName'][:2] == '@@':
# 接收到来自群的消息
if ":<br/>" in content:
[people, content] = content.split(':<br/>', 1)
groupName = srcName
srcName = self.getUserRemarkName(people)
dstName = 'GROUP'
else:
groupName = srcName
srcName = 'SYSTEM'
elif msg['raw_msg']['ToUserName'][:2] == '@@':
# 自己发给群的消息
groupName = dstName
dstName = 'GROUP'
# 收到了红包
if content == '收到红包,请在手机上查看':
msg['message'] = content
# 指定了消息内容
if 'message' in msg.keys():
content = msg['message']
if groupName != None:
print '%s |%s| %s -> %s: %s' % (message_id, groupName.strip(), srcName.strip(), dstName.strip(), content.replace('<br/>', '\n'))
logging.info('%s |%s| %s -> %s: %s' % (message_id, groupName.strip(),
srcName.strip(), dstName.strip(), content.replace('<br/>', '\n')))
else:
print '%s %s -> %s: %s' % (message_id, srcName.strip(), dstName.strip(), content.replace('<br/>', '\n'))
logging.info('%s %s -> %s: %s' % (message_id, srcName.strip(),
dstName.strip(), content.replace('<br/>', '\n')))
def handleMsg(self, r):
for msg in r['AddMsgList']:
print '[*] 你有新的消息,请注意查收'
logging.debug('[*] 你有新的消息,请注意查收')
if self.DEBUG:
fn = 'msg' + str(int(random.random() * 1000)) + '.json'
with open(fn, 'w') as f:
f.write(json.dumps(msg))
print '[*] 该消息已储存到文件: ' + fn
logging.debug('[*] 该消息已储存到文件: %s' % (fn))
msgType = msg['MsgType']
name = self.getUserRemarkName(msg['FromUserName'])
content = msg['Content'].replace('<', '<').replace('>', '>')
msgid = msg['MsgId']
if msgType == 1:
raw_msg = {'raw_msg': msg}
self._showMsg(raw_msg)
if self.autoReplyMode:
ans = self._xiaodoubi(content) + '\n[微信机器人自动回复]'
if self.webwxsendmsg(ans, msg['FromUserName']):
print '自动回复: ' + ans
logging.info('自动回复: ' + ans)
else:
print '自动回复失败'
logging.info('自动回复失败')
elif msgType == 3:
image = self.webwxgetmsgimg(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发送了一张图片: %s' % (name, image)}
self._showMsg(raw_msg)
self._safe_open(image)
elif msgType == 34:
voice = self.webwxgetvoice(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段语音: %s' % (name, voice)}
self._showMsg(raw_msg)
self._safe_open(voice)
elif msgType == 42:
info = msg['RecommendInfo']
print '%s 发送了一张名片:' % name
print '========================='
print '= 昵称: %s' % info['NickName']
print '= 微信号: %s' % info['Alias']
print '= 地区: %s %s' % (info['Province'], info['City'])
print '= 性别: %s' % ['未知', '男', '女'][info['Sex']]
print '========================='
raw_msg = {'raw_msg': msg, 'message': '%s 发送了一张名片: %s' % (
name.strip(), json.dumps(info))}
self._showMsg(raw_msg)
elif msgType == 47:
url = self._searchContent('cdnurl', content)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一个动画表情,点击下面链接查看: %s' % (name, url)}
self._showMsg(raw_msg)
self._safe_open(url)
elif msgType == 49:
appMsgType = defaultdict(lambda: "")
appMsgType.update({5: '链接', 3: '音乐', 7: '微博'})
print '%s 分享了一个%s:' % (name, appMsgType[msg['AppMsgType']])
print '========================='
print '= 标题: %s' % msg['FileName']
print '= 描述: %s' % self._searchContent('des', content, 'xml')
print '= 链接: %s' % msg['Url']
print '= 来自: %s' % self._searchContent('appname', content, 'xml')
print '========================='
card = {
'title': msg['FileName'],
'description': self._searchContent('des', content, 'xml'),
'url': msg['Url'],
'appname': self._searchContent('appname', content, 'xml')
}
raw_msg = {'raw_msg': msg, 'message': '%s 分享了一个%s: %s' % (
name, appMsgType[msg['AppMsgType']], json.dumps(card))}
self._showMsg(raw_msg)
elif msgType == 51:
raw_msg = {'raw_msg': msg, 'message': '[*] 成功获取联系人信息'}
self._showMsg(raw_msg)
elif msgType == 62:
video = self.webwxgetvideo(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段小视频: %s' % (name, video)}
self._showMsg(raw_msg)
self._safe_open(video)
elif msgType == 10002:
raw_msg = {'raw_msg': msg, 'message': '%s 撤回了一条消息' % name}
self._showMsg(raw_msg)
else:
logging.debug('[*] 该消息类型为: %d,可能是表情,图片, 链接或红包: %s' %
(msg['MsgType'], json.dumps(msg)))
raw_msg = {
'raw_msg': msg, 'message': '[*] 该消息类型为: %d,可能是表情,图片, 链接或红包' % msg['MsgType']}
self._showMsg(raw_msg)
def listenMsgMode(self):
print '[*] 进入消息监听模式 ... 成功'
logging.debug('[*] 进入消息监听模式 ... 成功')
self._run('[*] 进行同步线路测试 ... ', self.testsynccheck)
playWeChat = 0
redEnvelope = 0
while True:
self.lastCheckTs = time.time()
[retcode, selector] = self.synccheck()
if self.DEBUG:
print 'retcode: %s, selector: %s' % (retcode, selector)
logging.debug('retcode: %s, selector: %s' % (retcode, selector))
if retcode == '1100':
print '[*] 你在手机上登出了微信,债见'
logging.debug('[*] 你在手机上登出了微信,债见')
break
if retcode == '1101':
print '[*] 你在其他地方登录了 WEB 版微信,债见'
logging.debug('[*] 你在其他地方登录了 WEB 版微信,债见')
break
elif retcode == '0':
if selector == '2':
r = self.webwxsync()
if r is not None:
self.handleMsg(r)
elif selector == '6':
# TODO
redEnvelope += 1
print '[*] 收到疑似红包消息 %d 次' % redEnvelope
logging.debug('[*] 收到疑似红包消息 %d 次' % redEnvelope)
elif selector == '7':
playWeChat += 1
print '[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat
logging.debug('[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat)
r = self.webwxsync()
elif selector == '0':
time.sleep(1)
if (time.time() - self.lastCheckTs) <= 20:
time.sleep(time.time() - self.lastCheckTs)
def sendMsg(self, name, word, isfile=False):
id = self.getUSerID(name)
if id:
if isfile:
with open(word, 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
self._echo('-> ' + name + ': ' + line)
if self.webwxsendmsg(line, id):
print ' [成功]'
else:
print ' [失败]'
time.sleep(1)
else:
if self.webwxsendmsg(word, id):
print '[*] 消息发送成功'
logging.debug('[*] 消息发送成功')
else:
print '[*] 消息发送失败'
logging.debug('[*] 消息发送失败')
else:
print '[*] 此用户不存在'
logging.debug('[*] 此用户不存在')
def sendMsgToAll(self, word):
for contact in self.ContactList:
name = contact['RemarkName'] if contact[
'RemarkName'] else contact['NickName']
id = contact['UserName']
self._echo('-> ' + name + ': ' + word)
if self.webwxsendmsg(word, id):
print ' [成功]'
else:
print ' [失败]'
time.sleep(1)
def sendImg(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgimg(user_id, media_id)
def sendEmotion(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgemotion(user_id, media_id)
@catchKeyboardInterrupt
def start(self):
self._echo('[*] 微信网页版 ... 开动')
print
logging.debug('[*] 微信网页版 ... 开动')
while True:
self._run('[*] 正在获取 uuid ... ', self.getUUID)
self._echo('[*] 正在获取二维码 ... 成功')
print
logging.debug('[*] 微信网页版 ... 开动')
self.genQRCode()
print '[*] 请使用微信扫描二维码以登录 ... '
if not self.waitForLogin():
continue
print '[*] 请在手机上点击确认以登录 ... '
if not self.waitForLogin(0):
continue
break
self._run('[*] 正在登录 ... ', self.login)
self._run('[*] 微信初始化 ... ', self.webwxinit)
self._run('[*] 开启状态通知 ... ', self.webwxstatusnotify)
self._run('[*] 获取联系人 ... ', self.webwxgetcontact)
self._echo('[*] 应有 %s 个联系人,读取到联系人 %d 个' %
(self.MemberCount, len(self.MemberList)))
print
self._echo('[*] 共有 %d 个群 | %d 个直接联系人 | %d 个特殊账号 | %d 公众号或服务号' % (len(self.GroupList),
len(self.ContactList), len(self.SpecialUsersList), len(self.PublicUsersList)))
print
self._run('[*] 获取群 ... ', self.webwxbatchgetcontact)
logging.debug('[*] 微信网页版 ... 开动')
if self.DEBUG:
print self
logging.debug(self)
if self.interactive and raw_input('[*] 是否开启自动回复模式(y/n): ') == 'y':
self.autoReplyMode = True
print '[*] 自动回复模式 ... 开启'
logging.debug('[*] 自动回复模式 ... 开启')
else:
print '[*] 自动回复模式 ... 关闭'
logging.debug('[*] 自动回复模式 ... 关闭')
if sys.platform.startswith('win'):
import thread
thread.start_new_thread(self.listenMsgMode())
else:
listenProcess = multiprocessing.Process(target=self.listenMsgMode)
listenProcess.start()
while True:
text = raw_input('')
if text == 'quit':
listenProcess.terminate()
print('[*] 退出微信')
logging.debug('[*] 退出微信')
exit()
elif text[:2] == '->':
[name, word] = text[2:].split(':')
if name == 'all':
self.sendMsgToAll(word)
else:
self.sendMsg(name, word)
elif text[:3] == 'm->':
[name, file] = text[3:].split(':')
self.sendMsg(name, file, True)
elif text[:3] == 'f->':
print '发送文件'
logging.debug('发送文件')
elif text[:3] == 'i->':
print '发送图片'
[name, file_name] = text[3:].split(':')
self.sendImg(name, file_name)
logging.debug('发送图片')
elif text[:3] == 'e->':
print '发送表情'
[name, file_name] = text[3:].split(':')
self.sendEmotion(name, file_name)
logging.debug('发送表情')
def _safe_open(self, path):
if self.autoOpen:
if platform.system() == "Linux":
os.system("xdg-open %s &" % path)
else:
os.system('open %s &' % path)
def _run(self, str, func, *args):
self._echo(str)
if func(*args):
print '成功'
logging.debug('%s... 成功' % (str))
else:
print('失败\n[*] 退出程序')
logging.debug('%s... 失败' % (str))
logging.debug('[*] 退出程序')
exit()
def _echo(self, str):
sys.stdout.write(str)
sys.stdout.flush()
def _printQR(self, mat):
for i in mat:
BLACK = '\033[40m \033[0m'
WHITE = '\033[47m \033[0m'
print ''.join([BLACK if j else WHITE for j in i])
def _str2qr(self, str):
print(str)
qr = qrcode.QRCode()
qr.border = 1
qr.add_data(str)
qr.make()
# img = qr.make_image()
# img.save("qrcode.png")
#mat = qr.get_matrix()
#self._printQR(mat) # qr.print_tty() or qr.print_ascii()
qr.print_ascii(invert=True)
def _transcoding(self, data):
if not data:
return data
result = None
if type(data) == unicode:
result = data
elif type(data) == str:
result = data.decode('utf-8')
return result
def _get(self, url, api=None):
request = urllib2.Request(url=url)
request.add_header('Referer', 'https://wx.qq.com/')
if api == 'webwxgetvoice':
request.add_header('Range', 'bytes=0-')
if api == 'webwxgetvideo':
request.add_header('Range', 'bytes=0-')
try:
response = urllib2.urlopen(request)
data = response.read()
logging.debug(url)
return data
except urllib2.HTTPError, e:
logging.error('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.error('URLError = ' + str(e.reason))
except httplib.HTTPException, e:
logging.error('HTTPException')
except Exception:
import traceback
logging.error('generic exception: ' + traceback.format_exc())
return ''
def _post(self, url, params, jsonfmt=True):
if jsonfmt:
request = urllib2.Request(url=url, data=json.dumps(params))
request.add_header(
'ContentType', 'application/json; charset=UTF-8')
else:
request = urllib2.Request(url=url, data=urllib.urlencode(params))
try:
response = urllib2.urlopen(request)
data = response.read()
if jsonfmt:
return json.loads(data, object_hook=_decode_dict)
return data
except urllib2.HTTPError, e:
logging.error('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.error('URLError = ' + str(e.reason))
except httplib.HTTPException, e:
logging.error('HTTPException')
except Exception:
import traceback
logging.error('generic exception: ' + traceback.format_exc())
return ''
def _xiaodoubi(self, word):
url = 'http://www.xiaodoubi.com/bot/chat.php'
try:
r = requests.post(url, data={'chat': word})
return r.content
except:
return "让我一个人静静 T_T..."
def _simsimi(self, word):
key = ''
url = 'http://sandbox.api.simsimi.com/request.p?key=%s&lc=ch&ft=0.0&text=%s' % (
key, word)
r = requests.get(url)
ans = r.json()
if ans['result'] == '100':
return ans['response']
else:
return '你在说什么,风太大听不清列'
def _searchContent(self, key, content, fmat='attr'):
if fmat == 'attr':
pm = re.search(key + '\s?=\s?"([^"<]+)"', content)
if pm:
return pm.group(1)
elif fmat == 'xml':
pm = re.search('<{0}>([^<]+)</{0}>'.format(key), content)
if not pm:
pm = re.search(
'<{0}><\!\[CDATA\[(.*?)\]\]></{0}>'.format(key), content)
if pm:
return pm.group(1)
return '未知'
class UnicodeStreamFilter:
def __init__(self, target):
self.target = target
self.encoding = 'utf-8'
self.errors = 'replace'
self.encode_to = self.target.encoding
def write(self, s):
if type(s) == str:
s = s.decode('utf-8')
s = s.encode(self.encode_to, self.errors).decode(self.encode_to)
self.target.write(s)
def flush(self):
self.target.flush()
if sys.stdout.encoding == 'cp936':
sys.stdout = UnicodeStreamFilter(sys.stdout)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
if not sys.platform.startswith('win'):
import coloredlogs
coloredlogs.install(level='DEBUG')
webwx = WebWeixin()
webwx.start()
|
test_generator_api.py | import threading
import unittest
import pytest
import numpy
import cupy
from cupy import random
from cupy import testing
from cupy.testing import _condition
from cupy_tests.random_tests import common_distributions
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class GeneratorTestCase(common_distributions.BaseGeneratorTestCase):
target_method = None
def get_rng(self, xp, seed):
if xp is cupy:
return cupy.random._generator_api.Generator(
random._bit_generator.Philox4x3210(seed=seed))
else:
return numpy.random.Generator(numpy.random.MT19937(seed))
def set_rng_seed(self, seed):
self.rng.bit_generator = random._bit_generator.Philox4x3210(seed=seed)
class InvalidOutsMixin:
def invalid_dtype_out(self, **kwargs):
out = cupy.zeros((3, 2), dtype=cupy.float32)
with pytest.raises(TypeError):
self.generate(size=(3, 2), out=out, **kwargs)
def invalid_contiguity(self, **kwargs):
out = cupy.zeros((4, 6), dtype=cupy.float64)[0:3:, 0:2:]
with pytest.raises(ValueError):
self.generate(size=(3, 2), out=out, **kwargs)
def invalid_shape(self, **kwargs):
out = cupy.zeros((3, 3), dtype=cupy.float64)
with pytest.raises(ValueError):
self.generate(size=(3, 2), out=out, **kwargs)
def test_invalid_dtype_out(self):
self.invalid_dtype_out()
def test_invalid_contiguity(self):
self.invalid_contiguity()
def test_invalid_shape(self):
self.invalid_shape()
@testing.parameterize(*common_distributions.exponential_params)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestExponential(
common_distributions.Exponential,
GeneratorTestCase
):
pass
@testing.parameterize(*common_distributions.poisson_params)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestPoisson(
common_distributions.Poisson,
GeneratorTestCase
):
pass
@testing.parameterize(*common_distributions.beta_params)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestBeta(
common_distributions.Beta,
GeneratorTestCase
):
pass
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestStandardExponential(
InvalidOutsMixin,
common_distributions.StandardExponential,
GeneratorTestCase,
):
pass
@testing.parameterize(*common_distributions.gamma_params)
@testing.gpu
@testing.fix_random()
class TestGamma(
common_distributions.Gamma,
GeneratorTestCase,
):
pass
@testing.parameterize(*common_distributions.standard_gamma_params)
@testing.gpu
@testing.fix_random()
class TestStandardGamma(
common_distributions.StandardGamma,
GeneratorTestCase,
):
pass
@testing.gpu
@testing.fix_random()
class TestStandardGammaInvalid(InvalidOutsMixin, GeneratorTestCase):
target_method = 'standard_gamma'
def test_invalid_dtype_out(self):
self.invalid_dtype_out(shape=1.0)
def test_invalid_contiguity(self):
self.invalid_contiguity(shape=1.0)
out = cupy.zeros((4, 6), order='F', dtype=cupy.float64)
with pytest.raises(ValueError):
self.generate(size=(4, 6), out=out, shape=1.0)
def test_invalid_shape(self):
self.invalid_shape(shape=1.0)
def test_invalid_dtypes(self):
for dtype in 'bhiqleFD':
with pytest.raises(TypeError):
self.generate(size=(3, 2), shape=1.0, dtype=dtype)
@testing.gpu
@testing.fix_random()
class TestStandardGammaEmpty(GeneratorTestCase):
target_method = 'standard_gamma'
def test_empty_shape(self):
y = self.generate(shape=cupy.empty((1, 0)))
assert y.shape == (1, 0)
def test_empty_size(self):
y = self.generate(1.0, size=(1, 0))
assert y.shape == (1, 0)
def test_empty_out(self):
out = cupy.empty((1, 0))
y = self.generate(cupy.empty((1, 0)), out=out)
assert y is out
assert y.shape == (1, 0)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.parameterize(*common_distributions.standard_normal_params)
@testing.fix_random()
class TestStandardNormal(
common_distributions.StandardNormal,
GeneratorTestCase
):
pass
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestStandardNormalInvalid(InvalidOutsMixin, GeneratorTestCase):
target_method = 'standard_normal'
def test_invalid_dtypes(self):
for dtype in 'bhiqleFD':
with pytest.raises(TypeError):
self.generate(size=(3, 2), dtype=dtype)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestIntegers(GeneratorTestCase):
target_method = 'integers'
def test_integers_1(self):
self.generate(3)
def test_integers_2(self):
self.generate(3, 4, size=(3, 2))
def test_integers_empty1(self):
self.generate(3, 10, size=0)
def test_integers_empty2(self):
self.generate(3, size=(4, 0, 5))
def test_integers_overflow(self):
self.generate(numpy.int8(-100), numpy.int8(100))
def test_integers_float1(self):
self.generate(-1.2, 3.4, 5)
def test_integers_float2(self):
self.generate(6.7, size=(2, 3))
def test_integers_int64_1(self):
self.generate(2**34, 2**40, 3)
@_condition.repeat_with_success_at_least(10, 3)
def test_integers_ks(self):
self.check_ks(0.05)(
low=100, high=1000, size=2000)
@_condition.repeat_with_success_at_least(10, 3)
def test_integers_ks_low(self):
self.check_ks(0.05)(
low=100, size=2000)
@_condition.repeat_with_success_at_least(10, 3)
def test_integers_ks_large(self):
self.check_ks(0.05)(
low=2**34, high=2**40, size=2000)
@_condition.repeat_with_success_at_least(10, 3)
def test_integers_ks_large2(self):
self.check_ks(0.05)(
2**40, size=2000)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestRandom(InvalidOutsMixin, GeneratorTestCase):
# TODO(niboshi):
# Test soundness of distribution.
# Currently only reprocibility is checked.
target_method = 'random'
def test_random(self):
self.generate(3)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_random_ks(self, dtype):
self.check_ks(0.05)(size=2000, dtype=dtype)
@testing.parameterize(*common_distributions.geometric_params)
@testing.with_requires('numpy>=1.17.0')
@testing.fix_random()
class TestGeometric(
common_distributions.Geometric,
GeneratorTestCase
):
pass
@testing.parameterize(*common_distributions.hypergeometric_params)
@testing.with_requires('numpy>=1.17.0')
@testing.fix_random()
class TestHypergeometric(
common_distributions.Hypergeometric,
GeneratorTestCase
):
pass
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestRandomStateThreadSafe(unittest.TestCase):
def test_default_rng_thread_safe(self):
def _f(func, args=()):
cupy.cuda.Device().use()
func(*args)
seed = 10
threads = [
threading.Thread(
target=_f, args=(cupy.random.default_rng, (seed,))),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
]
for t in threads:
t.start()
for t in threads:
t.join()
actual = cupy.random.default_rng(seed).standard_exponential()
expected = cupy.random.default_rng(seed).standard_exponential()
assert actual == expected
|
run.py | #!/usr/bin/env python
"""MRIQC run script."""
from .. import config
def main():
"""Entry point."""
import os
import sys
import gc
from multiprocessing import Process, Manager
from .parser import parse_args
from ..utils.bids import write_derivative_description, write_bidsignore
# Run parser
parse_args()
if config.execution.pdb:
from mriqc.utils.debug import setup_exceptionhook
setup_exceptionhook()
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
config_file = config.execution.work_dir / ".mriqc.toml"
config.to_filename(config_file)
# Set up participant level
if "participant" in config.workflow.analysis_level:
config.loggers.cli.log(
25,
f"""
Running MRIQC version {config.environment.version}:
* BIDS dataset path: {config.execution.bids_dir}.
* Output folder: {config.execution.output_dir}.
* Analysis levels: {config.workflow.analysis_level}.
""",
)
# CRITICAL Call build_workflow(config_file, retval) in a subprocess.
# Because Python on Linux does not ever free virtual memory (VM), running the
# workflow construction jailed within a process preempts excessive VM buildup.
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
mriqc_wf = retval.get("workflow", None)
retcode = p.exitcode or retval.get("return_code", 0)
# CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
# function executed constrained in a process may change the config (and thus the global
# state of MRIQC).
config.load(config_file)
retcode = retcode or (mriqc_wf is None) * os.EX_SOFTWARE
if retcode != 0:
sys.exit(retcode)
if mriqc_wf and config.execution.write_graph:
mriqc_wf.write_graph(graph2use="colored", format="svg", simple_form=True)
# Clean up master process before running workflow, which may create forks
gc.collect()
if not config.execution.dry_run:
# Warn about submitting measures BEFORE
if not config.execution.no_sub:
config.loggers.cli.warning(config.DSA_MESSAGE)
# run MRIQC
mriqc_wf.run(**config.nipype.get_plugin())
# Warn about submitting measures AFTER
if not config.execution.no_sub:
config.loggers.cli.warning(config.DSA_MESSAGE)
config.loggers.cli.log(25, "Participant level finished successfully.")
# Set up group level
if "group" in config.workflow.analysis_level:
from ..utils.bids import DEFAULT_TYPES
from ..reports import group_html
from ..utils.misc import generate_tsv # , generate_pred
config.loggers.cli.info("Group level started...")
# Generate reports
mod_group_reports = []
for mod in config.execution.modalities or DEFAULT_TYPES:
output_dir = config.execution.output_dir
dataframe, out_tsv = generate_tsv(output_dir, mod)
# If there are no iqm.json files, nothing to do.
if dataframe is None:
continue
config.loggers.cli.info(
f"Generated summary TSV table for the {mod} data ({out_tsv})"
)
# out_pred = generate_pred(derivatives_dir, settings['output_dir'], mod)
# if out_pred is not None:
# log.info('Predicted QA CSV table for the %s data generated (%s)',
# mod, out_pred)
out_html = output_dir / f"group_{mod}.html"
group_html(
out_tsv,
mod,
csv_failed=output_dir / f"group_variant-failed_{mod}.csv",
out_file=out_html,
)
config.loggers.cli.info(f"Group-{mod} report generated ({out_html})")
mod_group_reports.append(mod)
if not mod_group_reports:
raise Exception("No data found. No group level reports were generated.")
config.loggers.cli.info("Group level finished successfully.")
config.loggers.cli.info("Generating BIDS Derivatives metadata")
write_derivative_description(config.execution.bids_dir, config.execution.output_dir)
write_bidsignore(config.execution.output_dir)
config.loggers.cli.info("MRIQC completed")
if __name__ == "__main__":
main()
|
utils.py | from __future__ import print_function
import sys
import time
import threading
import platform
import subprocess
import os
import numpy as np
import matplotlib.pyplot as plt
from fibre.utils import Event
import odrive.enums
from odrive.enums import *
try:
if platform.system() == 'Windows':
import win32console
import colorama
colorama.init()
except ImportError:
print("Could not init terminal features.")
print("Refer to install instructions at http://docs.odriverobotics.com/#downloading-and-installing-tools")
sys.stdout.flush()
pass
if sys.version_info < (3, 0):
input = raw_input
_VT100Colors = {
'green': '\x1b[92;1m',
'cyan': '\x1b[96;1m',
'yellow': '\x1b[93;1m',
'red': '\x1b[91;1m',
'default': '\x1b[0m'
}
def calculate_thermistor_coeffs(degree, Rload, R_25, Beta, Tmin, Tmax, plot = False):
T_25 = 25 + 273.15 #Kelvin
temps = np.linspace(Tmin, Tmax, 1000)
tempsK = temps + 273.15
# https://en.wikipedia.org/wiki/Thermistor#B_or_%CE%B2_parameter_equation
r_inf = R_25 * np.exp(-Beta/T_25)
R_temps = r_inf * np.exp(Beta/tempsK)
V = Rload / (Rload + R_temps)
fit = np.polyfit(V, temps, degree)
p1 = np.poly1d(fit)
fit_temps = p1(V)
if plot:
print(fit)
plt.plot(V, temps, label='actual')
plt.plot(V, fit_temps, label='fit')
plt.xlabel('normalized voltage')
plt.ylabel('Temp [C]')
plt.legend(loc=0)
plt.show()
return p1
class OperationAbortedException(Exception):
pass
def set_motor_thermistor_coeffs(axis, Rload, R_25, Beta, Tmin, TMax):
coeffs = calculate_thermistor_coeffs(3, Rload, R_25, Beta, Tmin, TMax)
axis.motor_thermistor.config.poly_coefficient_0 = float(coeffs[3])
axis.motor_thermistor.config.poly_coefficient_1 = float(coeffs[2])
axis.motor_thermistor.config.poly_coefficient_2 = float(coeffs[1])
axis.motor_thermistor.config.poly_coefficient_3 = float(coeffs[0])
def dump_errors(odrv, clear=False):
axes = [(name, axis) for name, axis in odrv._remote_attributes.items() if 'axis' in name]
axes.sort()
for name, axis in axes:
print(name)
# Flatten axis and submodules
# (name, remote_obj, errorcode)
module_decode_map = [
('axis', axis, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("AXIS_ERROR_")}),
('motor', axis.motor, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("MOTOR_ERROR_")}),
('fet_thermistor', axis.fet_thermistor, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("THERMISTOR_CURRENT_LIMITER_ERROR")}),
('motor_thermistor', axis.motor_thermistor, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("THERMISTOR_CURRENT_LIMITER_ERROR")}),
('encoder', axis.encoder, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("ENCODER_ERROR_")}),
('controller', axis.controller, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("CONTROLLER_ERROR_")}),
]
# Module error decode
for name, remote_obj, errorcodes in module_decode_map:
prefix = ' '*2 + name + ": "
if (remote_obj.error != 0):
foundError = False
print(prefix + _VT100Colors['red'] + "Error(s):" + _VT100Colors['default'])
errorcodes_tup = [(name, val) for name, val in errorcodes.items() if 'ERROR_' in name]
for codename, codeval in errorcodes_tup:
if remote_obj.error & codeval != 0:
foundError = True
print(" " + codename)
if not foundError:
print(" " + 'UNKNOWN ERROR!')
if clear:
remote_obj.error = 0
else:
print(prefix + _VT100Colors['green'] + "no error" + _VT100Colors['default'])
def oscilloscope_dump(odrv, num_vals, filename='oscilloscope.csv'):
with open(filename, 'w') as f:
for x in range(num_vals):
f.write(str(odrv.get_oscilloscope_val(x)))
f.write('\n')
data_rate = 100
plot_rate = 10
num_samples = 1000
def start_liveplotter(get_var_callback):
"""
Starts a liveplotter.
The variable that is plotted is retrieved from get_var_callback.
This function returns immediately and the liveplotter quits when
the user closes it.
"""
import matplotlib.pyplot as plt
cancellation_token = Event()
global vals
vals = []
def fetch_data():
global vals
while not cancellation_token.is_set():
try:
data = get_var_callback()
except Exception as ex:
print(str(ex))
time.sleep(1)
continue
vals.append(data)
if len(vals) > num_samples:
vals = vals[-num_samples:]
time.sleep(1/data_rate)
# TODO: use animation for better UI performance, see:
# https://matplotlib.org/examples/animation/simple_anim.html
def plot_data():
global vals
plt.ion()
# Make sure the script terminates when the user closes the plotter
def did_close(evt):
cancellation_token.set()
fig = plt.figure()
fig.canvas.mpl_connect('close_event', did_close)
while not cancellation_token.is_set():
plt.clf()
plt.plot(vals)
plt.legend(list(range(len(vals))))
fig.canvas.draw()
fig.canvas.start_event_loop(1/plot_rate)
fetch_t = threading.Thread(target=fetch_data)
fetch_t.daemon = True
fetch_t.start()
plot_t = threading.Thread(target=plot_data)
plot_t.daemon = True
plot_t.start()
return cancellation_token;
#plot_data()
class BulkCapture:
'''
Asynchronously captures a bulk set of data when instance is created.
get_var_callback: a function that returns the data you want to collect (see the example below)
data_rate: Rate in hz
length: Length of time to capture in seconds
Example Usage:
capture = BulkCapture(lambda :[odrv0.axis0.encoder.pos_estimate, odrv0.axis0.controller.pos_setpoint])
# Do stuff while capturing (like sending position commands)
capture.event.wait() # When you're done doing stuff, wait for the capture to be completed.
print(capture.data) # Do stuff with the data
capture.plot_data() # Helper method to plot the data
'''
def __init__(self,
get_var_callback,
data_rate=500.0,
duration=2.0):
from threading import Event, Thread
import numpy as np
self.get_var_callback = get_var_callback
self.event = Event()
def loop():
vals = []
start_time = time.monotonic()
period = 1.0 / data_rate
while time.monotonic() - start_time < duration:
try:
data = get_var_callback()
except Exception as ex:
print(str(ex))
print("Waiting 1 second before next data point")
time.sleep(1)
continue
relative_time = time.monotonic() - start_time
vals.append([relative_time] + data)
time.sleep(period - (relative_time % period)) # this ensures consistently timed samples
self.data = np.array(vals) # A lock is not really necessary due to the event
print("Capture complete")
achieved_data_rate = len(self.data) / self.data[-1, 0]
if achieved_data_rate < (data_rate * 0.9):
print("Achieved average data rate: {}Hz".format(achieved_data_rate))
print("If this rate is significantly lower than what you specified, consider lowering it below the achieved value for more consistent sampling.")
self.event.set() # tell the main thread that the bulk capture is complete
Thread(target=loop, daemon=True).start()
def plot(self):
import matplotlib.pyplot as plt
import inspect
from textwrap import wrap
plt.plot(self.data[:,0], self.data[:,1:])
plt.xlabel("Time (seconds)")
title = (str(inspect.getsource(self.get_var_callback))
.strip("['\\n']")
.split(" = ")[1])
plt.title("\n".join(wrap(title, 60)))
plt.legend(range(self.data.shape[1]-1))
plt.show()
def step_and_plot( axis,
step_size=100.0,
settle_time=0.5,
data_rate=500.0,
ctrl_mode=CONTROL_MODE_POSITION_CONTROL):
if ctrl_mode is CONTROL_MODE_POSITION_CONTROL:
get_var_callback = lambda :[axis.encoder.pos_estimate, axis.controller.pos_setpoint]
initial_setpoint = axis.encoder.pos_estimate
def set_setpoint(setpoint):
axis.controller.pos_setpoint = setpoint
elif ctrl_mode is CONTROL_MODE_VELOCITY_CONTROL:
get_var_callback = lambda :[axis.encoder.vel_estimate, axis.controller.vel_setpoint]
initial_setpoint = 0
def set_setpoint(setpoint):
axis.controller.vel_setpoint = setpoint
else:
print("Invalid control mode")
return
initial_settle_time = 0.5
initial_control_mode = axis.controller.config.control_mode # Set it back afterwards
print(initial_control_mode)
axis.controller.config.control_mode = ctrl_mode
axis.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL
capture = BulkCapture(get_var_callback,
data_rate=data_rate,
duration=initial_settle_time + settle_time)
set_setpoint(initial_setpoint)
time.sleep(initial_settle_time)
set_setpoint(initial_setpoint + step_size) # relative/incremental movement
capture.event.wait() # wait for Bulk Capture to be complete
axis.requested_state = AXIS_STATE_IDLE
axis.controller.config.control_mode = initial_control_mode
capture.plot()
def print_drv_regs(name, motor):
"""
Dumps the current gate driver regisers for the specified motor
"""
fault = motor.gate_driver.drv_fault
status_reg_1 = motor.gate_driver.status_reg_1
status_reg_2 = motor.gate_driver.status_reg_2
ctrl_reg_1 = motor.gate_driver.ctrl_reg_1
ctrl_reg_2 = motor.gate_driver.ctrl_reg_2
print(name + ": " + str(fault))
print("DRV Fault Code: " + str(fault))
print("Status Reg 1: " + str(status_reg_1) + " (" + format(status_reg_1, '#010b') + ")")
print("Status Reg 2: " + str(status_reg_2) + " (" + format(status_reg_2, '#010b') + ")")
print("Control Reg 1: " + str(ctrl_reg_1) + " (" + format(ctrl_reg_1, '#013b') + ")")
print("Control Reg 2: " + str(ctrl_reg_2) + " (" + format(ctrl_reg_2, '#09b') + ")")
def show_oscilloscope(odrv):
size = 18000
values = []
for i in range(size):
values.append(odrv.get_oscilloscope_val(i))
import matplotlib.pyplot as plt
plt.plot(values)
plt.show()
def rate_test(device):
"""
Tests how many integers per second can be transmitted
"""
# import matplotlib.pyplot as plt
# plt.ion()
print("reading 10000 values...")
numFrames = 10000
vals = []
for _ in range(numFrames):
vals.append(device.axis0.loop_counter)
loopsPerFrame = (vals[-1] - vals[0])/numFrames
loopsPerSec = (168000000/(6*3500))
FramePerSec = loopsPerSec/loopsPerFrame
print("Frames per second: " + str(FramePerSec))
# plt.plot(vals)
# plt.show(block=True)
def usb_burn_in_test(get_var_callback, cancellation_token):
"""
Starts background threads that read a values form the USB device in a spin-loop
"""
def fetch_data():
global vals
i = 0
while not cancellation_token.is_set():
try:
get_var_callback()
i += 1
except Exception as ex:
print(str(ex))
time.sleep(1)
i = 0
continue
if i % 1000 == 0:
print("read {} values".format(i))
threading.Thread(target=fetch_data, daemon=True).start()
def yes_no_prompt(question, default=None):
if default is None:
question += " [y/n] "
elif default == True:
question += " [Y/n] "
elif default == False:
question += " [y/N] "
while True:
print(question, end='')
choice = input().lower()
if choice in {'yes', 'y'}:
return True
elif choice in {'no', 'n'}:
return False
elif choice == '' and default is not None:
return default
|
pydnsproxy.py | #!/usr/bin/env python3
import json
import logging
import random
import re
import socket
import ssl
import struct
import urllib.request
from argparse import ArgumentParser
from copy import deepcopy
from datetime import datetime, timedelta
from enum import Enum
from logging import error, info
from socketserver import BaseRequestHandler, DatagramRequestHandler, ThreadingUDPServer
from threading import Thread
from time import sleep
from typing import Any, Callable, Dict, List, Set, \
Optional, Tuple, Union
##########
# Config #
##########
JsonObject = Dict[str, Any]
class ConfigError(Exception):
pass
def is_type(to_test: Any, type_: Any) -> None:
if not isinstance(to_test, type_):
raise ConfigError(f"Expected {to_test} to be {str(type_)}. Was {str(type(to_test))}.")
def test_is_type():
# pylint: disable=import-outside-toplevel
import pytest #type: ignore
is_type('hello world', str)
with pytest.raises(ConfigError):
is_type('hello world', int)
class ConfigBind:
host_name: str
port: int
def to_host_tuple(self) -> Tuple[str, int]:
return (self.host_name, self.port,)
@staticmethod
def validate_json(obj: JsonObject) -> None:
try:
is_type(obj['host'], str)
is_type(obj['port'], int)
except KeyError as error:
raise ConfigError(f"Expected {error.args[0]} in bind section.")
@staticmethod
def from_json(obj: JsonObject) -> 'ConfigBind':
ConfigBind.validate_json(obj)
bind = ConfigBind()
bind.host_name = obj['host']
bind.port = obj['port']
return bind
def test_config_bind():
# pylint: disable=import-outside-toplevel
import pytest # type: ignore
correct = {
'host': '127.0.0.1',
'port': 53535
}
incorrect = {}
assert ConfigBind.from_json(correct)
with pytest.raises(ConfigError):
ConfigBind.from_json(incorrect)
class ResourceKind(Enum):
HTTP = 1
FILE = 2
@staticmethod
def from_str(str_: str) -> 'ResourceKind':
if str_.lower() == 'http':
return ResourceKind.HTTP
if str_.lower() == 'file':
return ResourceKind.FILE
raise ConfigError(f"Unknown resource kind: {str_}")
def test_resource_kind():
# pylint: disable=import-outside-toplevel
import pytest # type: ignore
assert ResourceKind.from_str('http') == ResourceKind.HTTP
assert ResourceKind.from_str('file') == ResourceKind.FILE
with pytest.raises(ConfigError):
ResourceKind.from_str('does not work')
class BlockListFormat(Enum):
ONE_PER_LINE = 1
HOSTS = 2
@staticmethod
def from_str(str_: str) -> 'BlockListFormat':
if str_.lower() == 'one per line':
return BlockListFormat.ONE_PER_LINE
if str_.lower() == 'hosts':
return BlockListFormat.HOSTS
raise ConfigError(f"Unknown block list format: {str_}")
def test_blocklist_format():
# pylint: disable=import-outside-toplevel
import pytest # type: ignore
assert BlockListFormat.from_str('one per line') == BlockListFormat.ONE_PER_LINE
assert BlockListFormat.from_str('hosts') == BlockListFormat.HOSTS
with pytest.raises(ConfigError):
BlockListFormat.from_str('should not work')
class ConfigBlockList:
resource_kind: ResourceKind
location: str
list_format: BlockListFormat
@staticmethod
def validate_json(obj: JsonObject) -> None:
try:
is_type(ResourceKind.from_str(obj['kind']), ResourceKind)
is_type(obj['location'], str)
is_type(BlockListFormat.from_str(obj['format']), BlockListFormat)
except KeyError as error:
raise ConfigError(f"Expected {error.args[0]} in block list. Got {str(obj)}")
@staticmethod
def from_json(obj: JsonObject) -> 'ConfigBlockList':
ConfigBlockList.validate_json(obj)
blocklist = ConfigBlockList()
blocklist.resource_kind = ResourceKind.from_str(obj['kind'])
blocklist.location = obj['location']
blocklist.list_format = BlockListFormat.from_str(obj['format'])
return blocklist
def test_block_list():
# pylint: disable=import-outside-toplevel
import pytest # type: ignore
correct = {
'kind': 'http',
'location': 'https://127.0.0.1:8000/list.txt',
'format': 'one per line'
}
incorrect = {}
assert ConfigBlockList.from_json(correct)
with pytest.raises(ConfigError):
ConfigBlockList.from_json(incorrect)
class ConfigResolver:
host: str
port: int
tls_hostname: str
@staticmethod
def validate_json(obj: JsonObject) -> None:
try:
is_type(obj['host'], str)
is_type(obj['port'], int)
is_type(obj['tls_hostname'], str)
except KeyError as err:
raise ConfigError(f"Expected {err.args[0]} in resolver. Got {str(obj)}")
@staticmethod
def from_json(obj: JsonObject) -> 'ConfigResolver':
ConfigResolver.validate_json(obj)
resolver = ConfigResolver()
resolver.host = obj['host']
resolver.port = obj['port']
resolver.tls_hostname = obj['tls_hostname']
return resolver
def test_config_resolver():
# pylint: disable=import-outside-toplevel
import pytest # type: ignore
correct = {
'host': '8.8.8.8',
'port': 853,
'tls_hostname': 'dns.google'
}
incorrect = {}
assert ConfigResolver.from_json(correct)
with pytest.raises(ConfigError):
ConfigResolver.from_json(incorrect)
class Config:
bind: ConfigBind
block_lists: List[ConfigBlockList]
upgrade_block_lists_minutes: int
resolvers: List[ConfigResolver]
@staticmethod
def validate_json(obj: JsonObject) -> None:
try:
is_type(obj['bind_config'], dict)
is_type(obj['upgrade_block_lists_minutes'], int)
is_type(obj['block_lists'], list)
is_type(obj['resolvers'], list)
except KeyError as error:
raise ConfigError(f"Expected {error.args[0]} in config.")
@staticmethod
def from_json(obj: JsonObject) -> 'Config':
# Validate root object
Config.validate_json(obj)
# Unpack child sections
bind = ConfigBind.from_json(obj['bind_config'])
block_lists = []
for _list in obj['block_lists']:
block_lists.append(ConfigBlockList.from_json(_list))
resolvers = []
for resolver in obj['resolvers']:
resolvers.append(ConfigResolver.from_json(resolver))
# Construct and return config object
config = Config()
config.bind = bind
config.block_lists = block_lists
config.upgrade_block_lists_minutes = obj['upgrade_block_lists_minutes']
config.resolvers = resolvers
return config
@staticmethod
def from_json_file(path: str) -> 'Config':
with open(path, 'r') as _file:
obj = json.load(_file)
return Config.from_json(obj)
def test_config():
correct = {
'bind_config': {
'host': '127.0.0.1',
'port': 53535
},
'upgrade_block_lists_minutes': 1,
'block_lists': [
{
'kind': 'http',
'location': 'http://127.0.0.1:8000/list1.txt',
'format': 'one per line'
},
{
'kind': 'file',
'location': '/tmp/list2.txt',
'format': 'hosts'
}
],
'resolvers': [
{
'host': '8.8.8.8',
'port': 853,
'tls_hostname': 'dns.google'
},
{
'host': '1.1.1.1',
'port': 853,
'tls_hostname': 'cloudflare-dns.com'
}
]
}
config = Config.from_json(correct)
assert config
assert len(config.block_lists) == 2
########################################
# Block Lists (including auto-updates) #
########################################
class BlockListError(Exception):
pass
class BlockList:
def __init__(self):
self.blocked_domains: Set[str] = set()
self.comment_regex: re.Pattern = re.compile(r"((^|\s+)#(.|\s)+)$")
self.hosts_regex: re.Pattern = re.compile(r"^.+\s+")
self.whitespace_regex: re.Pattern = re.compile(r"\s+")
self.upgrade_thread: Optional[Thread] = None
def _extract_hostname(self, input_line: Union[str, bytes],
format_: BlockListFormat) -> Optional[str]:
# Decode from bytes to string if required
if isinstance(input_line, bytes):
line = input_line.decode()
else:
line = input_line
# Firstly, strip comments
line = self.comment_regex.sub("", line).strip()
if line == "" or line == '#' or self.whitespace_regex.match(line):
return None
# If hosts file format, strip out the leading IP address
if format_ == BlockListFormat.HOSTS:
line = self.hosts_regex.sub("", line).strip()
return line.strip()
def _upgrade_http(self, url: str, set_: Set[str], format_: BlockListFormat) -> None:
with urllib.request.urlopen(url) as _http:
for line in _http:
hostname = self._extract_hostname(line, format_)
if hostname:
set_.add(hostname)
def _upgrade_file(self, path: str, set_: Set[str], format_: BlockListFormat) -> None:
with open(path, 'r') as _file:
for line in _file:
hostname = self._extract_hostname(line, format_)
if hostname:
set_.add(hostname)
def upgrade(self, config: Config) -> None:
new_block_list: Set[str] = set()
info("Upgrading block lists...")
for block_list in config.block_lists:
try:
if block_list.resource_kind == ResourceKind.HTTP:
self._upgrade_http(block_list.location, new_block_list, block_list.list_format)
elif block_list.resource_kind == ResourceKind.FILE:
self._upgrade_file(block_list.location, new_block_list, block_list.list_format)
else:
raise BlockListError(f"Unknown block list format"
f"{str(block_list.resource_kind)}")
info(f"Upgraded {block_list.location}")
except Exception as err: # pylint: disable=broad-except
error(f"Couldn't update {block_list.location}. Error: {str(err)}")
continue
self.blocked_domains = new_block_list
def start_upgrade_thread(self, config: Config) -> None:
def upgrader() -> None:
try:
info(f"Started block list upgrader thread. Will run every"
f" {config.upgrade_block_lists_minutes} minutes.")
self.upgrade(config)
next_run = datetime.now() + timedelta(minutes=config.upgrade_block_lists_minutes)
while True:
if datetime.now() > next_run:
self.upgrade(config)
next_run = datetime.now() + \
timedelta(minutes=config.upgrade_block_lists_minutes)
else:
sleep(5)
except Exception as err: # pylint: disable=broad-except
error(f"Unexpected error in upgrader thread: {str(err)}")
if len(config.block_lists) == 0:
info("No block lists - not starting block list upgrader thread")
return
self.upgrade_thread = Thread(target=upgrader)
self.upgrade_thread.daemon = True
self.upgrade_thread.start()
def is_blocked(self, to_check: str) -> bool:
for entry in self.blocked_domains:
if to_check == entry:
return True
return False
def test_block_lists():
block_list = BlockList()
block_list.blocked_domains.add('test.com')
assert block_list.is_blocked('test.com') is True
assert block_list.is_blocked('allowed.com') is False
def test_extract_hostname():
block_list = BlockList()
extract = block_list._extract_hostname # pylint: disable=protected-access
assert extract("127.0.0.1 example.com", BlockListFormat.HOSTS) == "example.com"
assert extract("example.com", BlockListFormat.ONE_PER_LINE) == "example.com"
assert extract("# This is a comment", BlockListFormat.HOSTS) is None
assert extract("127.0.0.1 example.com # comment", BlockListFormat.HOSTS) == "example.com"
def test_upgrade_file():
# pylint: disable=import-outside-toplevel,protected-access
from tempfile import NamedTemporaryFile # type: ignore
with NamedTemporaryFile() as temp:
temp.write("google.com\nask.com".encode())
temp.flush()
lists = BlockList()
set_: Set[str] = set()
lists._upgrade_file(temp.name, set_, BlockListFormat.ONE_PER_LINE)
assert 'google.com' in set_
assert 'ask.com' in set_
assert 'example.com' not in set_
def test_upgrade_http():
# pylint: disable=import-outside-toplevel,protected-access,redefined-outer-name,reimported
from http.server import HTTPServer, BaseHTTPRequestHandler # type: ignore
from threading import Thread # type: ignore
class Server(HTTPServer):
def handle_error(self, _a, _b):
pass
class Handler(BaseHTTPRequestHandler):
# pylint: disable=invalid-name
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write("google.com\nask.com\n".encode())
self.wfile.close()
httpd = Server(('127.0.0.1', 8001), Handler)
runner = Thread(target=httpd.serve_forever)
runner.start()
lists = BlockList()
set_: Set[str] = set()
lists._upgrade_http("http://127.0.0.1:8001/", set_, BlockListFormat.ONE_PER_LINE)
httpd.shutdown()
runner.join()
assert 'google.com' in set_
assert 'ask.com' in set_
assert 'example.com' not in set_
#######################
# DNS Message Parsing #
#######################
class DNSMessageError(Exception):
pass
class DNSMessage:
# pylint: disable=too-few-public-methods
bytes_: bytes
def __init__(self, bytes_: bytes):
self.bytes_ = bytes_
def questions(self) -> int:
return struct.unpack("!H", self.bytes_[4:6])[0]
def to_nxdomain(self) -> bytes:
bytes_ = bytearray(deepcopy(self.bytes_))
bytes_[2] = 0x81
bytes_[3] = 0x83
return bytes(bytes_)
def hostname(self):
try:
if self.questions() != 1:
raise DNSMessageError("Will only parse DNS messages that have one question")
# Jump to first question
hostname = ""
offset = 12
while True:
# Get the current size. If zero, we've finished
size = int(self.bytes_[offset]); offset += 1
if size == 0:
break
this = self.bytes_[offset:(offset + size)]; offset += size
hostname += this.decode() + "."
return hostname[:-1]
except IndexError:
raise DNSMessageError("Message was too small at {len(self.bytes_)} bytes")
def test_dns_hostname():
from binascii import unhexlify
msg_bytes = unhexlify("e47201200001000000000001046d61"
"696c06676f6f676c6503636f6d0000"
"0100010000291000000000000000")
msg = DNSMessage(msg_bytes)
assert msg.hostname() == 'mail.google.com'
def test_dns_questions():
from binascii import unhexlify
msg_bytes = unhexlify("e47201200001000000000001046d61"
"696c06676f6f676c6503636f6d0000"
"0100010000291000000000000000")
msg = DNSMessage(msg_bytes)
assert msg.questions() == 1
#######################
# DNS to DNS-over-TLS #
#######################
def proxy_request(req: bytes, upstream: ConfigResolver) -> bytes:
host = upstream.host
port = upstream.port
hostname = upstream.tls_hostname
ctx = ssl.create_default_context()
with socket.create_connection((host, port)) as sock:
with ctx.wrap_socket(sock, server_hostname=hostname) as ssock:
ssock.write(struct.pack("!H", len(req)))
ssock.write(req)
resp_size = struct.unpack("!H", ssock.read(len=2))[0]
resp = ssock.read(len=resp_size)
return resp
class ProxyHandler(DatagramRequestHandler):
def handle(self):
msg = self.rfile.read()
server = random.choice(self.server.config.resolvers)
try:
dns_msg = DNSMessage(msg)
hostname = dns_msg.hostname()
if self.server.block_list.is_blocked(hostname):
self.wfile.write(dns_msg.to_nxdomain())
return
except DNSMessageError:
pass
self.wfile.write(proxy_request(msg, server))
class ProxyServer(ThreadingUDPServer):
config: Config
def __init__(self, config: Config, block_list: BlockList,
handler: Callable[..., BaseRequestHandler]):
self.config = config
self.block_list = block_list
super().__init__(config.bind.to_host_tuple(), handler)
################
# Entry Points #
################
def parse_args() -> Config:
parser = ArgumentParser()
parser.add_argument("-c", "--config", required=True,
help="Path to a configuration file")
args = parser.parse_args()
return Config.from_json_file(args.config)
def main() -> None:
logging.getLogger().setLevel(logging.INFO)
config = parse_args()
block_list = BlockList()
block_list.start_upgrade_thread(config)
try:
server = ProxyServer(config, block_list, ProxyHandler)
server.serve_forever()
except KeyboardInterrupt:
info("Done!")
if __name__ == '__main__':
main()
|
pyglview.py | #!/usr/bin/env python3
import logging
import os
import signal
import sys
import threading
import time
import numpy as np
from easydict import EasyDict as edict
mouse_x = 0
mouse_y = 0
height = 720
get_input = False
sample_size = (5 , 5)
try:
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import OpenGL.GLUT
AVAILABLE_OPENGL = True
except Exception as e:
print(e)
print("Error: Does not exist OpenGL library")
print(" > sudo apt install -y libxmu-dev libxi-dev # install before GPU driver")
print(" > pip3 install PyOpenGL PyOpenGL-accelerate")
AVAILABLE_OPENGL = False
logger = logging.getLogger(__name__)
def to_bool(s):
return s in [1, 'True', 'TRUE', 'true', '1', 'yes', 'Yes', 'Y', 'y', 't']
def handler(signum, frame):
exit(0)
signal.signal(signal.SIGINT, handler)
config = edict()
config.viewer = edict({"window_name": "Screen", "vsync": False, "double_buffer": False, "rgba_buffer": False, "fullscreen": False, "window_x": 100, "window_y": 100, "window_width": 1280, "window_height": 720, "cpu": False})
def get_config():
return {section: dict(config[section]) for section in config.sections()}
class Viewer:
def init(self, kargs):
for k in kargs:
setattr(self, k, kargs[k])
def s_bool(s, k):
setattr(s, k, to_bool(getattr(s, k)))
def s_int(s, k):
setattr(s, k, int(getattr(s, k)))
s_bool(self, "vsync")
s_bool(self, "double_buffer")
s_bool(self, "rgba_buffer")
s_bool(self, "fullscreen")
s_int(self, "window_x")
s_int(self, "window_y")
s_int(self, "window_width")
s_int(self, "window_height")
s_bool(self, "cpu")
logger.debug(f"Window:{self.window_width}")
def __init__(self, **kargs):
global config
self.keyboard_listener = None
self.cnt = 0
self.tm = 0
self.cnt2 = 0
self.image_buffer = None
self.destructor_function = None
self.idle_function = None
self.previous_time = time.time()
cv = config.viewer
for k in cv:
setattr(self, k, cv[k])
self.init(kargs)
def set_window_name(self, name):
self.window_name = name
def set_sample_size(self, size):
global sample_size
sample_size = size
def set_image(self, img):
self.image_buffer = img
def set_loop(self, func):
self.idle_function = func
def set_sample_cb(self, func):
self.sample_cb = func
def set_destructor(self, func):
self.destructor_function = func
def enable_fullscreen(self):
self.fullscreen = True
def disable_fullscreen(self):
self.fullscreen = True
def enable_vsync(self):
if "darwin" in sys.platform:
return
try:
import ctypes
import ctypes.util
ogl = ctypes.cdll.LoadLibrary(ctypes.util.find_library("OpenGL"))
v = ctypes.c_int(1)
ogl.CGLGetCurrentContext.argtypes = []
ogl.CGLGetCurrentContext.restype = ctypes.c_void_p
ogl.CGLSetParameter.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ogl.CGLSetParameter.restype = ctypes.c_int
context = ogl.CGLGetCurrentContext()
ogl.CGLSetParameter(context, 222, ctypes.pointer(v))
logger.debug("Enabled vsync")
except Exception as e:
logger.warning("Unable to set vsync mode, using driver defaults: {}".format(e))
def start(self, **kargs):
global AVAILABLE_OPENGL
self.init(kargs)
window_type = "offscreen"
if "linux" in sys.platform:
if 'DISPLAY' in os.environ:
logger.debug(f"DISPLAY: {os.environ['DISPLAY']}")
if os.environ['DISPLAY'] == ':0':
window_type = "primary"
else:
AVAILABLE_OPENGL = False
window_type = "virtual"
else:
AVAILABLE_OPENGL = False
else:
window_type = "primary"
logger.debug(f"WindowType: {window_type}")
logger.debug(f"Available OpenGL: {AVAILABLE_OPENGL}")
logger.debug(f"GPU: {self.cpu is False}")
if self.cpu is False and AVAILABLE_OPENGL:
logger.info("")
logger.info("---- Use GPU directly ----")
logger.info("")
args = []
logger.debug(f"VSync: {self.vsync}")
if self.vsync:
args.append('-sync')
self.enable_vsync()
logger.debug(f"ARGS: {args}")
w = self.window_width
h = self.window_height
x = self.window_x
y = self.window_y
glutInit(args)
DB = GLUT_SINGLE
CL = GLUT_RGB
if self.double_buffer:
DB = GLUT_DOUBLE
logger.debug("Use double buffer")
else:
logger.debug("Use single buffer")
if self.rgba_buffer:
CL = GLUT_RGBA
logger.debug("Use rgba buffer")
else:
logger.debug("Use rgb buffer")
glutInitDisplayMode(CL | DB | GLUT_DEPTH)
glutInitWindowSize(w, h)
glutInitWindowPosition(x, y)
glutCreateWindow(self.window_name)
if self.fullscreen: glutFullScreen()
glutDisplayFunc(self.__gl_draw)
glutIdleFunc(self.__gl_draw)
glutReshapeFunc(self.__gl_resize)
#glutMouseFunc(self.__gl_mouse)
glutKeyboardFunc(self.__gl_keyboard)
glutSpecialFunc(self.__gl_keyboard)
glClearColor(0.0, 0.0, 0.0, 1.0)
glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-1, 1, -1, 1, -1, 1)
glutMainLoop()
else:
if window_type == "offscreen":
#import cv2
import imgcat
import queue
import multiprocessing
def iterm2_renderer(q):
while True:
img = q.get()
print("\033[0;0f")
imgcat.imgcat(img)
if True:
q = multiprocessing.Queue()
th = multiprocessing.Process(target=iterm2_renderer, args=(q, ), daemon=True)
th.start()
else:
q = queue.Queue()
th = threading.Thread(target=iterm2_renderer, args=(q, ))
th.setDaemon(True)
th.start()
logger.warning("@WARNING: No display.")
logger.warning("---- No display: iTerm2 renderer will be used ----")
while True:
if self.idle_function is not None:
try:
self.idle_function()
except Exception as e:
logger.error(e)
return
if self.image_buffer is not None:
try:
self.cnt += 1
#self.image_buffer = cv2.cvtColor(self.image_buffer,cv2.COLOR_BGR2RGB)
if time.time() - self.tm > 1.0:
#logger.info(f"\033[0KViewer[N/A]-FPS {self.cnt}\033[1A")
self.tm = time.time()
self.cnt = 0
if q.empty():
q.put(self.image_buffer)
#imgcat.imgcat(self.image_buffer)
time.sleep(0.008)
except Exception as e:
logger.error(e)
return
self.image_buffer = None
else:
time.sleep(0.008)
else:
import cv2
if self.cpu is False: logger.warning("@WARNING: GPU or physical display is not available.")
logger.warning("---- Use CPU(OpenCV) renderer ----")
buffer = np.zeros(shape=(self.window_height, self.window_width, 3), dtype=np.uint8)
if self.fullscreen:
cv2.namedWindow(self.window_name, cv2.WINDOW_NORMAL)
# cv2.namedWindow(self.window_name, cv2.WINDOW_OPENGL)
cv2.setWindowProperty(self.window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
else:
cv2.namedWindow(self.window_name, cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL)
# cv2.namedWindow(self.window_name, cv2.WINDOW_NORMAL)
# pass
# cv2.namedWindow(self.window_name, cv2.WINDOW_GUI_NORMAL)
while True:
if self.idle_function is not None:
try:
self.idle_function()
except:
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return
if self.image_buffer is not None:
try:
self.cnt += 1
self.image_buffer = cv2.cvtColor(self.image_buffer, cv2.COLOR_BGR2RGB)
buffer.fill(0)
w = self.window_width
h = self.window_height
iw = self.image_buffer.shape[1]
ih = self.image_buffer.shape[0]
img = self.image_buffer
r = w / h
ir = iw / ih
ratio = 1.0
if r > ir:
ratio = h / ih
img = cv2.resize(img, (int(img.shape[1] * ratio), int(img.shape[0] * ratio)))
hlf = int((w - img.shape[1]) / 2)
buffer[0:img.shape[0], hlf:img.shape[1] + hlf, ] = img
else:
ratio = w / iw
img = cv2.resize(img, (int(img.shape[1] * ratio), int(img.shape[0] * ratio)))
hlf = int((h - img.shape[0]) / 2)
buffer[hlf:img.shape[0] + hlf, 0:img.shape[1], ] = img
if time.time() - self.tm > 1.0:
logger.info(f"\033[0KViewer[CV2]-FPS {self.cnt}\033[1A")
self.tm = time.time()
self.cnt = 0
if self.fullscreen:
cv2.imshow(self.window_name, self.image_buffer)
else:
cv2.imshow(self.window_name, buffer)
if cv2.waitKey(8) & 0xFF == 27:
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return
except Exception as e:
logger.warning(e)
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return
self.image_buffer = None
else:
time.sleep(0.008)
def __gl_resize(self, Width, Height): # Retina problem.
x, y, w, h = glGetIntegerv(GL_VIEWPORT)
self.window_width = w
self.window_height = h
#glViewport(0, 0, w, h)
# glViewport(0, 0, Width, Height)
#glfwGetFramebufferSize(window, &width, &height);
# glViewport(0, 0, int(self.window_width), int(self.window_height))
def __gl_mouse(element, button, state, x, y):
global get_input, height, mouse_x, mouse_y
if button == GLUT_LEFT_BUTTON and state == GLUT_DOWN:
print("CLICK on %s, %s, %s, %s" % (button, state, x, y))
#self.__mouse_cb(x, y)
mouse_x = x
mouse_y = height - y
get_input = True
def __gl_keyboard(self, key, x, y):
if type(key) == bytes:
key = ord(key)
else:
key = 0x0100 + key
if self.keyboard_listener: self.keyboard_listener(key, x, y)
if key == b'q' or key == 113 or key == b'\x1b' or key == b'\x03':
if self.destructor_function is not None:
logger.info("Call destructor function")
self.destructor_function()
exit(9)
return
def __gl_draw(self):
global mouse_x, mouse_y, get_input
self.cnt2 += 1
if self.idle_function is not None: self.idle_function()
if self.image_buffer is not None:
try:
self.cnt += 1
if time.time() - self.tm > 1.0:
logger.info(f"\033[0KViewer[GPU]-FPS {self.cnt} Idle {self.cnt2}\033[1A")
self.tm = time.time()
self.cnt = 0
self.cnt2 = 0
for i in threading.enumerate():
if i.name == "MainThread":
if i.is_alive() is False:
exit(9)
return
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor3f(1.0, 1.0, 1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-1, 1, -1, 1, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, self.image_buffer.shape[1], self.image_buffer.shape[0], 0, GL_RGB, GL_UNSIGNED_BYTE, self.image_buffer)
glEnable(GL_TEXTURE_2D)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glBegin(GL_QUADS)
glTexCoord2d(0.0, 1.0)
w = self.window_width
h = self.window_height
iw = self.image_buffer.shape[1]
ih = self.image_buffer.shape[0]
r = w / h
ir = iw / ih
x_ratio = 1.0
y_ratio = 1.0
if r > ir:
x_ratio = ir / r
else:
y_ratio = r / ir
glVertex3d(-x_ratio, -y_ratio, 0.0)
glTexCoord2d(1.0, 1.0)
glVertex3d(x_ratio, -y_ratio, 0.0)
glTexCoord2d(1.0, 0.0)
glVertex3d(x_ratio, y_ratio, 0.0)
glTexCoord2d(0.0, 0.0)
glVertex3d(-x_ratio, y_ratio, 0.0)
glEnd()
glFlush()
if get_input:
c = glReadPixels(mouse_x, mouse_y, *sample_size, GL_RGB, GL_UNSIGNED_BYTE, None)
print(c.hex())
self.sample_cb(c)
get_input=False
if self.double_buffer:
glutSwapBuffers()
except Exception as e:
logger.error(e)
exit(9)
return
self.image_buffer = None
if time.time() - self.previous_time < 0.008:
time.sleep(0.005)
self.previous_time = time.time()
if __name__ == '__main__':
import cv2
import argparse
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter("%(asctime)s [%(filename)s:%(lineno)d] %(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.handlers.clear()
import coloredlogs
coloredlogs.install()
parser = argparse.ArgumentParser(description='')
parser.add_argument('input', type=str, help='')
parser.add_argument('--codec', type=str, default="libx265", help='')
parser.add_argument('--quality', type=int, default=None, help='')
parser.add_argument('--quality_adjust', type=int, default=None, help='+6=low, +3=middle, high=0')
parser.add_argument('--quality_test', action='store_true')
parser.add_argument('--resolution', type=int, default=0, help='0/1280/1920')
parser.add_argument('--test', action='store_true')
parser.add_argument('--animation', action='store_true')
parser.add_argument('--volume', type=str, default=None, help='')
parser.add_argument('--disable_two_pass', action='store_true')
parser.add_argument('--generate', action='store_true')
parser.add_argument('--disable_audio_normalize', action='store_true')
args = parser.parse_args()
viewer = Viewer(cpu=False, fullscreen=False)
# viewer = Viewer(opengl_direct=False)
# viewer = Viewer(window_width=512,window_height=512,fullscreen=True,opengl_direct=True)
# viewer = Viewer(window_width=512,window_height=512,fullscreen=True,opengl_direct=False)
cap = cv2.VideoCapture(os.path.expanduser(args.input))
if cap is None:
logger.debug("Could not detect capture fd")
exit(9)
def loop():
check, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if check:
viewer.set_image(frame)
viewer.set_loop(loop)
viewer.start()
logger.debug("Main thread ended")
else:
logger.addHandler(logging.NullHandler())
|
face5.py | import cv2
import requests,json
import threading,time
import numpy as np
from insightface.app import FaceAnalysis
video = cv2.VideoCapture(0,cv2.CAP_DSHOW)
video.set(cv2.CAP_PROP_FPS,30)
video.set(cv2.CAP_PROP_FRAME_WIDTH,1280)
video.set(cv2.CAP_PROP_FRAME_HEIGHT,720)
app=FaceAnalysis()
app.prepare(ctx_id=0,det_size=(640,640))
faces="0"
nowtime=0
#10秒
waittime=10
timestate=False
#0:wait 1:active
fps=0
fpsc=0
fpstime=time.time()
def POST(Data):
#Post先
#テスト
url ="https://httpbin.org/post"
#本番
#url="http://133.167.122.196:8080/api/posts"
response=requests.post(url,json={"numPeople":Data})
if(response.status_code==200):
print("POST Success")
else:
print("Post Failed :%d"%response.status_code)
print(response.text)
while video.isOpened():
fpsc=fpsc+1
if(time.time()-fpstime>1):
fps=fpsc
fpsc=0
fpstime=time.time()
# フレームを読込み
ret, frame = video.read()
# フレームが読み込めなかった場合は終了(動画が終わると読み込めなくなる)
if not ret: break
height = frame.shape[0]
width = frame.shape[1]
faces = app.get(np.asarray(frame))
#顔認識した人数
cv2.putText(frame,"Human:{0}".format(len(faces)),(10,100), cv2.FONT_HERSHEY_PLAIN, 3, (0,0,255), 2, cv2.LINE_AA)
cv2.putText(frame,"FPS:{0}".format(fps),(10,200),cv2.FONT_HERSHEY_PLAIN,3,(255,0,0),2,cv2.LINE_AA)
if not timestate:
#POST用スレッド
Post_thread=threading.Thread(target=POST,args=(len(faces),))
timestate=True
nowtime=time.time()
if(time.time()-nowtime>waittime):
Post_thread.start()
timestate=False
# フレームの描画
#cv2.imshow('frame', frame)
frame=app.draw_on(frame, faces)
cv2.imshow('frame', frame)
# qキーの押下で処理を中止
key = cv2.waitKey(1) & 0xFF
if key == ord('q'): break
#メモリの解放
video.release()
cv2.destroyAllWindows()
|
pipeline.py | from multiprocessing import Process,Queue
import multiprocessing as multiprocessing
import importlib
import hashlib
import random
import time
import os
import yaml
from signal import signal, SIGCHLD
import sys
def read_pipelines(fname):
absdir=os.path.dirname(os.path.abspath(fname))
with open(fname) as f:
pipelines=yaml.load(f, Loader=yaml.BaseLoader)
for pipeline_name,component_list in pipelines.items():
new_component_list=[c.format(thisdir=absdir) for c in component_list]
pipelines[pipeline_name]=new_component_list
return pipelines
class Pipeline:
def __init__(self, steps, extra_args=None):
""" """
self.ctx=multiprocessing.get_context()
self.job_counter=0
self.done_jobs={}
self.max_q_size=5
self.q_in=self.ctx.Queue(self.max_q_size) #where to send data to the whole pipeline
self.q_out=self.q_in #where to receive data from the whole pipeline
self.modules = []
self.processes=[]
for mod_name_and_params in steps:
self.add_step(mod_name_and_params, extra_args)
try:
signal(SIGCHLD, self.handle_sigchld)
except ValueError:
print(
"Warning: could not install SIGCHLD handler. "
"Pipeline will not terminate if children exit abnormally."
)
def handle_sigchld(self, signum, frame):
while 1:
pid, exitno = os.waitpid(0, os.WNOHANG)
if pid == 0:
return
if exitno == 0:
continue
for module, process in zip(self.modules, self.processes):
if process.pid != pid:
continue
print(
f"Error: pipeline stage died with exit code {exitno}: {module}",
file=sys.stderr,
flush=True
)
sys.exit(-64)
def join(self):
for p in self.processes:
p.join()
def is_alive(self):
for p in self.processes:
if not p.is_alive():
return False
return True
def add_step(self,module_name_and_params, extra_args):
config=module_name_and_params.split()
module_name=config[0]
params=config[1:]
# collect extra arguments from command line meant for this particular module
if extra_args is not None:
for _name, _value in extra_args.__dict__.items():
if _name.startswith(module_name):
_modname,_argname=_name.split(".",1) # for example lemmatizer_mod.gpu
params.append("--"+_argname)
params.append(str(_value))
mod=importlib.import_module("tnparser."+module_name)
step_in=self.q_out
self.q_out=self.ctx.Queue(self.max_q_size) #new pipeline end
args=mod.argparser.parse_args(params)
process=self.ctx.Process(target=mod.launch,args=(args,step_in,self.q_out))
process.daemon=True
process.start()
self.modules.append(module_name_and_params)
self.processes.append(process)
def send_final(self):
self.q_in.put(("FINAL",""))
def put(self,txt,final=False):
"""Start parsing a job, return id which can be used to retrieve the result"""
batch_id=hashlib.md5((str(random.random())+txt).encode("utf-8")).hexdigest()
self.q_in.put((batch_id,txt)) #first job of 1 total
self.job_counter+=1
if final:
self.q_in.put(("FINAL",""))
return batch_id
def get(self,batch_id):
if batch_id is None: #get any next batch, don't care about batch_id
_,finished=self.q_out.get()
self.job_counter-=1
return finished
elif batch_id in self.done_jobs:
self.job_counter-=1
return self.done_jobs.pop(batch_id)
else:
#get the next job, maybe it's the one?
finished_id,finished=self.q_out.get()
if finished_id==batch_id:
self.job_counter-=1
return finished
else: #something got done, but it's not the right one
self.done_jobs[finished_id]=finished
return None #whoever asked will have to ask again
def parse(self,txt):
job_id=self.put(txt)
while True:
res=self.get(job_id)
if res is None:
time.sleep(0.1)
else:
break
return res
def parse_batched(self,inp,):
"""inp: is a file-like object with input data
yield_res:
"""
pass
|
test_ki.py | import outcome
import pytest
import sys
import os
import signal
import threading
import contextlib
import time
from async_generator import (
async_generator, yield_, isasyncgenfunction, asynccontextmanager
)
from ... import _core
from ...testing import wait_all_tasks_blocked
from ..._util import signal_raise, is_main_thread
from ..._timeouts import sleep
from .tutil import slow
def ki_self():
signal_raise(signal.SIGINT)
def test_ki_self():
with pytest.raises(KeyboardInterrupt):
ki_self()
async def test_ki_enabled():
# Regular tasks aren't KI-protected
assert not _core.currently_ki_protected()
# Low-level call-soon callbacks are KI-protected
token = _core.current_trio_token()
record = []
def check():
record.append(_core.currently_ki_protected())
token.run_sync_soon(check)
await wait_all_tasks_blocked()
assert record == [True]
@_core.enable_ki_protection
def protected():
assert _core.currently_ki_protected()
unprotected()
@_core.disable_ki_protection
def unprotected():
assert not _core.currently_ki_protected()
protected()
@_core.enable_ki_protection
async def aprotected():
assert _core.currently_ki_protected()
await aunprotected()
@_core.disable_ki_protection
async def aunprotected():
assert not _core.currently_ki_protected()
await aprotected()
# make sure that the decorator here overrides the automatic manipulation
# that start_soon() does:
async with _core.open_nursery() as nursery:
nursery.start_soon(aprotected)
nursery.start_soon(aunprotected)
@_core.enable_ki_protection
def gen_protected():
assert _core.currently_ki_protected()
yield
for _ in gen_protected():
pass
@_core.disable_ki_protection
def gen_unprotected():
assert not _core.currently_ki_protected()
yield
for _ in gen_unprotected():
pass
# This used to be broken due to
#
# https://bugs.python.org/issue29590
#
# Specifically, after a coroutine is resumed with .throw(), then the stack
# makes it look like the immediate caller is the function that called
# .throw(), not the actual caller. So child() here would have a caller deep in
# the guts of the run loop, and always be protected, even when it shouldn't
# have been. (Solution: we don't use .throw() anymore.)
async def test_ki_enabled_after_yield_briefly():
@_core.enable_ki_protection
async def protected():
await child(True)
@_core.disable_ki_protection
async def unprotected():
await child(False)
async def child(expected):
import traceback
traceback.print_stack()
assert _core.currently_ki_protected() == expected
await _core.checkpoint()
traceback.print_stack()
assert _core.currently_ki_protected() == expected
await protected()
await unprotected()
# This also used to be broken due to
# https://bugs.python.org/issue29590
async def test_generator_based_context_manager_throw():
@contextlib.contextmanager
@_core.enable_ki_protection
def protected_manager():
assert _core.currently_ki_protected()
try:
yield
finally:
assert _core.currently_ki_protected()
with protected_manager():
assert not _core.currently_ki_protected()
with pytest.raises(KeyError):
# This is the one that used to fail
with protected_manager():
raise KeyError
async def test_agen_protection():
@_core.enable_ki_protection
@async_generator
async def agen_protected1():
assert _core.currently_ki_protected()
try:
await yield_()
finally:
assert _core.currently_ki_protected()
@_core.disable_ki_protection
@async_generator
async def agen_unprotected1():
assert not _core.currently_ki_protected()
try:
await yield_()
finally:
assert not _core.currently_ki_protected()
# Swap the order of the decorators:
@async_generator
@_core.enable_ki_protection
async def agen_protected2():
assert _core.currently_ki_protected()
try:
await yield_()
finally:
assert _core.currently_ki_protected()
@async_generator
@_core.disable_ki_protection
async def agen_unprotected2():
assert not _core.currently_ki_protected()
try:
await yield_()
finally:
assert not _core.currently_ki_protected()
for agen_fn in [
agen_protected1,
agen_protected2,
agen_unprotected1,
agen_unprotected2,
]:
async for _ in agen_fn(): # noqa
assert not _core.currently_ki_protected()
# asynccontextmanager insists that the function passed must itself be an
# async gen function, not a wrapper around one
if isasyncgenfunction(agen_fn):
async with asynccontextmanager(agen_fn)():
assert not _core.currently_ki_protected()
# Another case that's tricky due to:
# https://bugs.python.org/issue29590
with pytest.raises(KeyError):
async with asynccontextmanager(agen_fn)():
raise KeyError
# Test the case where there's no magic local anywhere in the call stack
def test_ki_enabled_out_of_context():
assert not _core.currently_ki_protected()
def test_ki_disabled_in_del():
def nestedfunction():
return _core.currently_ki_protected()
def __del__():
assert _core.currently_ki_protected()
assert nestedfunction()
__del__()
assert not nestedfunction()
def test_ki_protection_works():
async def sleeper(name, record):
try:
while True:
await _core.checkpoint()
except _core.Cancelled:
record.add((name + " ok"))
async def raiser(name, record):
try:
# os.kill runs signal handlers before returning, so we don't need
# to worry that the handler will be delayed
print("killing, protection =", _core.currently_ki_protected())
ki_self()
except KeyboardInterrupt:
print("raised!")
# Make sure we aren't getting cancelled as well as siginted
await _core.checkpoint()
record.add((name + " raise ok"))
raise
else:
print("didn't raise!")
# If we didn't raise (b/c protected), then we *should* get
# cancelled at the next opportunity
try:
await _core.wait_task_rescheduled(
lambda _: _core.Abort.SUCCEEDED
)
except _core.Cancelled:
record.add((name + " cancel ok"))
# simulated control-C during raiser, which is *unprotected*
print("check 1")
record = set()
async def check_unprotected_kill():
async with _core.open_nursery() as nursery:
nursery.start_soon(sleeper, "s1", record)
nursery.start_soon(sleeper, "s2", record)
nursery.start_soon(raiser, "r1", record)
with pytest.raises(KeyboardInterrupt):
_core.run(check_unprotected_kill)
assert record == {"s1 ok", "s2 ok", "r1 raise ok"}
# simulated control-C during raiser, which is *protected*, so the KI gets
# delivered to the main task instead
print("check 2")
record = set()
async def check_protected_kill():
async with _core.open_nursery() as nursery:
nursery.start_soon(sleeper, "s1", record)
nursery.start_soon(sleeper, "s2", record)
nursery.start_soon(
_core.enable_ki_protection(raiser), "r1", record
)
# __aexit__ blocks, and then receives the KI
with pytest.raises(KeyboardInterrupt):
_core.run(check_protected_kill)
assert record == {"s1 ok", "s2 ok", "r1 cancel ok"}
# kill at last moment still raises (run_sync_soon until it raises an
# error, then kill)
print("check 3")
async def check_kill_during_shutdown():
token = _core.current_trio_token()
def kill_during_shutdown():
assert _core.currently_ki_protected()
try:
token.run_sync_soon(kill_during_shutdown)
except _core.RunFinishedError:
# it's too late for regular handling! handle this!
print("kill! kill!")
ki_self()
token.run_sync_soon(kill_during_shutdown)
with pytest.raises(KeyboardInterrupt):
_core.run(check_kill_during_shutdown)
# KI arrives very early, before main is even spawned
print("check 4")
class InstrumentOfDeath:
def before_run(self):
ki_self()
async def main():
await _core.checkpoint()
with pytest.raises(KeyboardInterrupt):
_core.run(main, instruments=[InstrumentOfDeath()])
# checkpoint_if_cancelled notices pending KI
print("check 5")
@_core.enable_ki_protection
async def main():
assert _core.currently_ki_protected()
ki_self()
with pytest.raises(KeyboardInterrupt):
await _core.checkpoint_if_cancelled()
_core.run(main)
# KI arrives while main task is not abortable, b/c already scheduled
print("check 6")
@_core.enable_ki_protection
async def main():
assert _core.currently_ki_protected()
ki_self()
await _core.cancel_shielded_checkpoint()
await _core.cancel_shielded_checkpoint()
await _core.cancel_shielded_checkpoint()
with pytest.raises(KeyboardInterrupt):
await _core.checkpoint()
_core.run(main)
# KI arrives while main task is not abortable, b/c refuses to be aborted
print("check 7")
@_core.enable_ki_protection
async def main():
assert _core.currently_ki_protected()
ki_self()
task = _core.current_task()
def abort(_):
_core.reschedule(task, outcome.Value(1))
return _core.Abort.FAILED
assert await _core.wait_task_rescheduled(abort) == 1
with pytest.raises(KeyboardInterrupt):
await _core.checkpoint()
_core.run(main)
# KI delivered via slow abort
print("check 8")
@_core.enable_ki_protection
async def main():
assert _core.currently_ki_protected()
ki_self()
task = _core.current_task()
def abort(raise_cancel):
result = outcome.capture(raise_cancel)
_core.reschedule(task, result)
return _core.Abort.FAILED
with pytest.raises(KeyboardInterrupt):
assert await _core.wait_task_rescheduled(abort)
await _core.checkpoint()
_core.run(main)
# KI arrives just before main task exits, so the run_sync_soon machinery
# is still functioning and will accept the callback to deliver the KI, but
# by the time the callback is actually run, main has exited and can't be
# aborted.
print("check 9")
@_core.enable_ki_protection
async def main():
ki_self()
with pytest.raises(KeyboardInterrupt):
_core.run(main)
print("check 10")
# KI in unprotected code, with
# restrict_keyboard_interrupt_to_checkpoints=True
record = []
async def main():
# We're not KI protected...
assert not _core.currently_ki_protected()
ki_self()
# ...but even after the KI, we keep running uninterrupted...
record.append("ok")
# ...until we hit a checkpoint:
with pytest.raises(KeyboardInterrupt):
await sleep(10)
_core.run(main, restrict_keyboard_interrupt_to_checkpoints=True)
assert record == ["ok"]
record = []
# Exact same code raises KI early if we leave off the argument, doesn't
# even reach the record.append call:
with pytest.raises(KeyboardInterrupt):
_core.run(main)
assert record == []
# KI arrives while main task is inside a cancelled cancellation scope
# the KeyboardInterrupt should take priority
print("check 11")
@_core.enable_ki_protection
async def main():
assert _core.currently_ki_protected()
with _core.CancelScope() as cancel_scope:
cancel_scope.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
ki_self()
with pytest.raises(KeyboardInterrupt):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
_core.run(main)
def test_ki_is_good_neighbor():
# in the unlikely event someone overwrites our signal handler, we leave
# the overwritten one be
try:
orig = signal.getsignal(signal.SIGINT)
def my_handler(signum, frame): # pragma: no cover
pass
async def main():
signal.signal(signal.SIGINT, my_handler)
_core.run(main)
assert signal.getsignal(signal.SIGINT) is my_handler
finally:
signal.signal(signal.SIGINT, orig)
# Regression test for #461
def test_ki_with_broken_threads():
thread = threading.main_thread()
# scary!
original = threading._active[thread.ident]
# put this in a try finally so we don't have a chance of cascading a
# breakage down to everything else
try:
del threading._active[thread.ident]
@_core.enable_ki_protection
async def inner():
assert signal.getsignal(
signal.SIGINT
) != signal.default_int_handler
_core.run(inner)
finally:
threading._active[thread.ident] = original
# For details on why this test is non-trivial, see:
# https://github.com/python-trio/trio/issues/42
# https://github.com/python-trio/trio/issues/109
# To make it an even better test, we should try doing
# pthread_kill(pthread_self, SIGINT)
# in the child thread, to make sure signals in non-main threads also wake up
# the main loop... but currently that test would fail (see gh-109 again).
@slow
def test_ki_wakes_us_up():
assert is_main_thread()
# This test is flaky due to a race condition on Windows; see:
# https://github.com/python-trio/trio/issues/119
# https://bugs.python.org/issue30038
# I think the only fix is to wait for fixed CPython to be released, so in
# the mean time, on affected versions we send two signals (equivalent to
# hitting control-C twice). This works because the problem is that the C
# level signal handler does
#
# write-to-fd -> set-flags
#
# and we need
#
# set-flags -> write-to-fd
#
# so running the C level signal handler twice does
#
# write-to-fd -> set-flags -> write-to-fd -> set-flags
#
# which contains the desired sequence.
#
# Affected version of CPython include:
# - all versions of 3.5 (fix will not be backported)
# - 3.6.1 and earlier
# It's fixed in 3.6.2 and 3.7+
#
# PyPy was never affected.
#
# The problem technically occurs on Unix as well, if a signal is delivered
# to a non-main thread, and if we were relying on the wakeup fd to wake
# us. Currently we don't use the wakeup fd on Unix anyway, though (see
# gh-109).
#
# There's also this theoretical problem, but hopefully it won't actually
# bite us in practice:
# https://bugs.python.org/issue31119
# https://bitbucket.org/pypy/pypy/issues/2623
import platform
buggy_wakeup_fd = (
os.name == "nt" and platform.python_implementation() == "CPython"
and sys.version_info < (3, 6, 2)
)
# lock is only needed to avoid an annoying race condition where the
# *second* ki_self() call arrives *after* the first one woke us up and its
# KeyboardInterrupt was caught, and then generates a second
# KeyboardInterrupt that aborts the test run. The kill_soon thread holds
# the lock while doing the calls to ki_self, which means that it holds it
# while the C-level signal handler is running. Then in the main thread,
# when we're woken up we know that ki_self() has been run at least once;
# if we then take the lock it guaranteeds that ki_self() has been run
# twice, so if a second KeyboardInterrupt is going to arrive it should
# arrive by the time we've acquired the lock. This lets us force it to
# happen inside the pytest.raises block.
#
# It will be very nice when the buggy_wakeup_fd bug is fixed.
lock = threading.Lock()
def kill_soon():
# We want the signal to be raised after the main thread has entered
# the IO manager blocking primitive. There really is no way to
# deterministically interlock with that, so we have to use sleep and
# hope it's long enough.
time.sleep(1.1)
with lock:
print("thread doing ki_self()")
ki_self()
if buggy_wakeup_fd:
print("buggy_wakeup_fd =", buggy_wakeup_fd)
ki_self()
async def main():
thread = threading.Thread(target=kill_soon)
print("Starting thread")
thread.start()
try:
with pytest.raises(KeyboardInterrupt):
# To limit the damage on CI if this does get broken (as
# compared to sleep_forever())
print("Going to sleep")
try:
await sleep(20)
print("Woke without raising?!") # pragma: no cover
# The only purpose of this finally: block is to soak up the
# second KeyboardInterrupt that might arrive on
# buggy_wakeup_fd platforms. So it might get aborted at any
# moment randomly on some runs, so pragma: no cover avoids
# coverage flapping:
finally: # pragma: no cover
print("waiting for lock")
with lock:
print("got lock")
# And then we want to force a PyErr_CheckSignals. Which is
# not so easy on Windows. Weird kluge: builtin_repr calls
# PyObject_Repr, which does an unconditional
# PyErr_CheckSignals for some reason.
print(repr(None))
# And finally, it's possible that the signal was delivered
# but at a moment when we had KI protection enabled, so we
# need to execute a checkpoint to ensure it's delivered
# before we exit main().
await _core.checkpoint()
finally:
print("joining thread", sys.exc_info())
thread.join()
start = time.perf_counter()
try:
_core.run(main)
finally:
end = time.perf_counter()
print("duration", end - start)
print("sys.exc_info", sys.exc_info())
assert 1.0 <= (end - start) < 2
|
run_local_test.py | """run local test in starting kit"""
# pylint: disable=logging-fstring-interpolation
import argparse
import logging
import os
from os.path import join, isdir
import shutil
from multiprocessing import Process
VERBOSITY_LEVEL = 'WARNING'
logging.basicConfig(
level=getattr(logging, VERBOSITY_LEVEL),
format='%(asctime)s %(levelname)s %(filename)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
def _here(*args):
here = os.path.dirname(os.path.realpath(__file__))
return os.path.join(here, *args)
def _ingestion_program(starting_kit_dir):
return join(starting_kit_dir, 'ingestion', 'ingestion.py')
def _scoring_program(starting_kit_dir):
return join(starting_kit_dir, 'scoring', 'score.py')
def remove_dir(output_dir):
"""Remove the directory `output_dir`.
This aims to clean existing output of last run of local test.
"""
if isdir(output_dir):
logging.info(
f"Cleaning existing output directory of last run: {output_dir}")
shutil.rmtree(output_dir)
def _clean(starting_kit_dir):
ingestion_output_dir = join(starting_kit_dir, 'sample_result_submission')
score_dir = os.path.join(starting_kit_dir, 'scoring_output')
remove_dir(ingestion_output_dir)
remove_dir(score_dir)
def run(dataset_dir, code_dir):
"""run"""
# Current directory containing this script
starting_kit_dir = _here()
path_ingestion = _ingestion_program(starting_kit_dir)
path_scoring = _scoring_program(starting_kit_dir)
# Run ingestion and scoring at the same time
command_ingestion = (
'python '
# f'{path_ingestion} --dataset_dir={dataset_dir}/data '
f'{path_ingestion} --dataset_dir={dataset_dir}/train.data'
f' --code_dir={code_dir}')
command_scoring = (
# f'python {path_scoring} --solution_dir={dataset_dir}/solution')
f'python {path_scoring} --solution_dir={dataset_dir}')
def run_ingestion():
os.system(command_ingestion)
def run_scoring():
os.system(command_scoring)
ingestion_process = Process(name='ingestion', target=run_ingestion)
scoring_process = Process(name='scoring', target=run_scoring)
_clean(starting_kit_dir)
ingestion_process.start()
scoring_process.start()
def _parse_args():
default_starting_kit_dir = _here()
default_dataset_dir = join(default_starting_kit_dir, 'data', '15')
default_code_dir = join(default_starting_kit_dir, 'code_submission')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', type=str,
default=default_dataset_dir,
help="Directory storing the dataset, should contain"
"'data' and 'solution'")
parser.add_argument('--code_dir', type=str,
default=default_code_dir,
help="Directory storing the submission code "
"`model.py` and other necessary packages.")
args = parser.parse_args()
return args
def main():
"""main entry"""
args = _parse_args()
dataset_dir = args.dataset_dir
code_dir = args.code_dir
logging.info("#" * 50)
logging.info("Begin running local test using")
logging.info(f"code_dir = {code_dir}")
logging.info(f"dataset_dir = {dataset_dir}")
logging.info("#" * 50)
run(dataset_dir, code_dir)
if __name__ == '__main__':
main()
|
node.py | #!./env python
import xmlrpc.client
import random
from functools import reduce
from multiprocessing import Manager, Process
from timer_utils import *
class AppendEntriesRequest:
@classmethod
def from_json(cls, json):
return cls(**json)
def __init__(self, term, serverid, prevLogIndex=0, prevLogTerm=0, commitIndex=0, entries=[]):
self.term = term
self.serverid = serverid
self.prevLogIndex = prevLogIndex
self.prevLogTerm = prevLogTerm
self.commitIndex = commitIndex
# self.entries = [Entry(**entry) if isinstance(entry, dict) else entry for entry in entries]
self.entries = [Entry.from_json(entry) if isinstance(entry, dict) else entry for entry in entries]
def __repr__(self):
return '[term]{}: [id]{}: [prevIndex]{}: [prevTerm]{}: [commitIndex]{}: [entries]{}'.format(self.term, self.serverid, self.prevLogIndex, self.prevLogTerm, self.commitIndex, self.entries)
class AppendEntriesReply:
@classmethod
def from_json(cls, json):
return cls(**json)
def __init__(self, term, success, matchIndex=0):
# matchIndex is related to log roll back
# currently unavailable
self.term = term
self.success = success
self.matchIndex = matchIndex
class RequestVoteRequest:
@classmethod
def from_json(cls, json):
return cls(**json)
def __init__(self, term, serverid, lastLogIndex=0, lastLogTerm=0):
self.term = term
self.serverid = serverid
self.lastLogIndex = lastLogIndex
self.lastLogTerm = lastLogTerm
class RequestVoteReply:
@classmethod
def from_json(cls, json):
return cls(**json)
def __init__(self, term, success):
self.term = term
self.success = success
class Entry:
@classmethod
def from_json(cls, json):
return cls(**json)
def __init__(self, index=0, term=0, filename='', version='', hashlist=[]):
self.index = index
self.term = term
self.filename = filename
self.version = version
self.hashlist = hashlist
def __eq__(self, other):
return (self.index, self.term, self.filename, self.version, self.hashlist) == (other.index, other.term, other.filename, other.version, other.hashlist)
def run(self, file_info_map):
# file_info_map is a state machine
file_info_map[self.filename] = [self.version, self.hashlist]
def __repr__(self):
return '[index]: {} [term]: {} [filename]: [] [version]: []'.format(self.index, self.term, self.filename, self.hashlist)
class Node:
# __electionTimeout = (4, 5) # 150 - 300 ms
# __broadcastTimeout = 1 # 50 ms
__electionTimeout = (0.5, 1)
# __electionTimeout = (0.15, 0.3) # 150 - 300 ms
__broadcastTimeout = 0.05 # 50 ms
__commitTimeout = 0.001
__states = {'Follower', 'Leader', 'Candidate'}
def __init__(self, serverid, serverlist):
self.serverid = serverid
self.serverlist = serverlist
self.clients = dict()
self.num_servers = len(serverlist) + 1
self.state = None
self.is_crashed = False
self.term = 0
self.voteFor = None
self.log = [Entry()] # log # initial an empty entry to pass the consistency check at the very first
# self.file_info_map = dict() # state machine
self.file_info_map = {} # {'test.txt': [-1, []]}
self.commitIndex = 0
self.lastApplied = 0
# only for leader
self.nextIndex = []
self.matchIndex = []
self._append_entry = None
self._request_vote = None
self._election_timer = None
# commit timer, whenever possible, commit changes
self._commit = RepeatingTimer(self.term, self.__commitTimeout, self.commit)
self.switch('Candidate')
def add(self, entry):
# ensure index consistency # incase mixed appendentry
entry.index = len(self.log)
self.log.append(entry)
print('add entry at %i' % entry.index)
def commit(self):
if not self.commitIndex > self.lastApplied:
return
print('last committed: %i; commit to %i' % (self.lastApplied, self.commitIndex))
while not self.commitIndex <= self.lastApplied:
self.lastApplied += 1
self.log[self.lastApplied].run(self.file_info_map)
print('last committed: %i' % self.lastApplied)
def clear_timer(self):
if self._append_entry and self._append_entry.is_alive():
self._append_entry.cancel()
if self._request_vote and self._request_vote.is_alive():
self._request_vote.cancel()
if self._election_timer and self._election_timer.is_alive():
print('{} timer {}'.format(self.serverid, self._election_timer.interval))
self._election_timer.cancel()
def switch(self, state, term=None):
assert state in self.__states, 'State {} invalid!'.format(state)
print(self.serverid, 'switch from {} to {}'.format(self.state, state))
self.clear_timer()
self.state = state
if self.is_crashed:
return
if state == 'Follower':
if term:
self.term = term
self.voteFor = None
self._election_timer = OnceTimer(self.term, self.__electionTimeout, self.switch, ['Candidate'])
elif state == 'Leader':
self.nextIndex = dict([(hostport, self.log[-1].index+1) for hostport in self.serverlist])
self.matchIndex = dict([(hostport, 0) for hostport in self.serverlist])
self._append_entry = RepeatingTimer(self.term, self.__broadcastTimeout, self.append_entry)
elif state == 'Candidate':
self._request_vote = RepeatingTimer(self.term, self.__electionTimeout, self.request_vote)
else:
raise KeyError(state)
def is_leader(self):
if self.is_crashed:
return False
else:
return self.state == 'Leader'
# def call(self, replies, host_port, func, args, kwargs):
# if host_port not in self.clients:
# self.clients[host_port] = xmlrpc.client.ServerProxy('http://{}'.format(host_port))
# try:
# replies.append(getattr(self.clients[host_port].surfstore, func)(*args, **kwargs))
# except Exception as e:
# print('[{}] Fail to call {} {}'.format(e, func, host_port))
# pass
#
# def call_all(self, func, *args, **kwargs):
# replies = Manager().list()
# workers = list()
# for host_port in self.serverlist:
# p = Process(target=self.call, args=(replies, host_port, func, args, kwargs))
# p.start()
# workers.append(p)
# for p in workers:
# p.join()
# return replies
def call(self, replies, host_port, func, request):
if host_port not in self.clients:
self.clients[host_port] = xmlrpc.client.ServerProxy('http://{}'.format(host_port))
try:
# replies.append(getattr(self.clients[host_port].surfstore, func)(request))
replies[host_port] = getattr(self.clients[host_port].surfstore, func)(request)
except Exception as e:
print('[{}] Fail to call {} {}'.format(e, func, host_port))
pass
def call_all(self, func, requests=[]):
replies = Manager().dict() # list()
workers = list()
if not requests:
requests = [''] * len(self.serverlist)
for host_port, request in zip(self.serverlist, requests):
p = Process(target=self.call, args=(replies, host_port, func, request))
p.start()
workers.append(p)
for p in workers:
p.join()
return replies
def majority_of_nodes_working(self):
num = self.num_servers - sum(list(self.call_all('isCrashed').values()))
return num > self.num_servers / 2
def marshal_appendEntry(self):
requests = []
for hostport in self.serverlist:
request = AppendEntriesRequest(self.term, self.serverid)
if self.log[-1].index >= self.nextIndex[hostport]:
request.prevLogIndex = self.nextIndex[hostport] - 1
request.prevLogTerm = self.log[request.prevLogIndex].term
request.entries = self.log[self.nextIndex[hostport]:]
request.commitIndex = self.commitIndex
else:
request.prevLogIndex = self.log[-1].index
request.prevLogTerm = self.log[request.prevLogIndex].term
request.entries = []
request.commitIndex = self.commitIndex
requests.append(request)
return requests
def marshal_vote(self):
request = RequestVoteRequest(self.term, self.serverid)
request.lastLogIndex = self.log[-1].index
request.lastLogTerm = self.log[-1].term
return [request] * len(self.serverlist)
def handle_appendEntry(self, request):
# crash check
if self.is_crashed:
raise Exception('Im crashed!')
# instanize
request = AppendEntriesRequest.from_json(request)
print('[AppendEntriesRequest handle] %s ' % request)
# term check
if request.term < self.term:
return AppendEntriesReply(self.term, False)
if request.term > self.term:
self.switch('Follower', request.term)
'''
not sure if should success here
'''
# return AppendEntriesReply(request.term, True)
# log consistency check
print('consistency check')
if self.log[-1].index >= request.prevLogIndex and self.log[request.prevLogIndex].term != request.prevLogTerm:
return AppendEntriesReply(self.term, False)
# append entries to log
print('appending entries')
if request.entries:
for entry in request.entries:
print('entry')
if self.log[-1].index >= entry.index and self.log[entry.index].term != entry.term:
print('entry roll back')
index = self.log[-1].index
while index >= entry.index:
self.log.pop()
index -= 1
'''
not very sure what does 'already in' mean
'''
print('check entry in log')
if entry not in self.log:
print('log appended')
self.add(entry)
# change will be committed later some time
# this must be done after entries are appended to log
if request.commitIndex > self.commitIndex:
if request.entries:
self.commitIndex = min(request.commitIndex, request.entries[-1].index)
else:
self.commitIndex = request.commitIndex
# self.commitIndex = min(request.commitIndex, self.log[-1].index)
print('switch follower')
self.switch('Follower', request.term)
return AppendEntriesReply(self.term, True, request.prevLogIndex)
def handle_vote(self, request):
# crash check
if self.is_crashed:
raise Exception('Im crashed!')
request = RequestVoteRequest.from_json(request)
# term check
if request.term < self.term:
return RequestVoteReply(self.term, False)
if request.term > self.term:
self.switch('Follower', request.term)
# return RequestVoteReply(request.term, True)
if self.voteFor is None or self.voteFor == request.serverid:
# at least as up-to-date as in terms of last log's term and index
if (request.lastLogTerm > self.log[-1].term) or \
(request.lastLogTerm == self.log[-1].term and request.lastLogIndex >= self.log[-1].index):
self.voteFor = request.serverid
return RequestVoteReply(self.term, True)
return RequestVoteReply(self.term, False)
def check_marjority(self, replies):
# num_vote = sum(list(map(lambda x: x.success, replies))) + 1
num_vote = sum([replies[hostport].success for hostport in replies]) + 1
if num_vote > self.num_servers / 2:
return True
return False
def check_term(self, replies):
max_term = max([replies[hostport].term for hostport in replies])
if max_term > self.term and not self.is_crashed:
self.switch('Follower', max_term)
return False
return True
def check_nextIndex(self, replies):
for hostport in replies:
reply = replies[hostport]
if reply.success:
self.matchIndex[hostport] = reply.matchIndex # self.nextIndex[hostport]
self.nextIndex[hostport] = self.log[-1].index + 1
else:
'''
will there be the case this nextindex keeps decrementing to negative infinity?
not possible.
appendentry will only return false when the follower's term is higher
at this point the leader will already transit to follower
'''
self.nextIndex[hostport] -= 1
def append_entry(self):
requests = self.marshal_appendEntry()
for request in requests:
print('[appendEntries marshal] %s' % request)
print('[nextIndex:]', self.nextIndex)
print('[matchIndex:]', self.matchIndex)
print(self.log)
replies = self.call_all('appendEntries', requests)
print('Node {} Term {} heart beat {}'.format(self.serverid, self.term, replies))
replies = dict([(hostport, AppendEntriesReply.from_json(replies[hostport])) for hostport in replies])
# replies = [ if replies[hostport] for hostport in self.serverlist] # instanize
if not replies: return
if not self.check_term(replies): return
self.check_nextIndex(replies)
if self.check_marjority(replies):
'''
simple version, didn't consider roll back
not sure about the relation between commitIndex and lastApplied here
'''
for n in range(self.log[-1].index, self.commitIndex, -1):
if sum([self.matchIndex[hostport] >= n for hostport in self.serverlist])+1 > self.num_servers/2 and \
self.log[n].term == self.term:
self.commitIndex = n
break
def request_vote(self):
self.term += 1
self.voteFor = self.serverid
requests = self.marshal_vote()
replies = self.call_all('requestVote', requests)
print('Node {} Term {} request vote {}'.format(self.serverid, self.term, replies))
# replies = [RequestVoteReply.from_json(reply) for reply in replies] # instanize
replies = dict([(hostport, RequestVoteReply.from_json(replies[hostport])) for hostport in replies])
if not replies or self.state != 'Candidate': return
if not self.check_term(replies): return
if self.check_marjority(replies):
self.switch('Leader')
|
ms.py | """
The SocketServer module does not provide support for multicast. This module
provides a subclass of SocketServer.UDPServer that can listen on multicast
addresses.
This only supports IPv4
"""
import SocketServer
import sys,socket, struct, platform, threading, signal
discoveryServer = None
discoveryThread = None
cgsyncServer = None
cgsyncThread = None
class ServerDiscoveryServer(SocketServer.UDPServer):
"""Extends UDPServer to join multicast groups and bind
the local interface properly
"""
def __init__(self, multicast_address, RequestHandlerClass, listen_interfaces = None):
#to receive multicast packets, must bind the port,
#set bind_and_active to True.
#Note: some hosts don't allow bind()'ing to a multicast address,
#so bind to INADDR_ANY
SocketServer.UDPServer.__init__(self, ('', multicast_address[1]),
RequestHandlerClass, True)
#Note: struct ip_mreq { struct in_addr (multicast addr), struct in_addr
#(local interface) }
if listen_interfaces is None:
mreq = struct.pack("4sI", socket.inet_aton(multicast_address[0]),
socket.INADDR_ANY)
self.socket.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP, mreq)
else:
for interface in listen_interfaces:
mreq = socket.inet_aton(
multicast_address[0]) + socket.inet_aton(interface)
self.socket.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP, mreq)
def server_close(self):
#TODO: leave the multicast groups...
print("SERVER CLOSE")
pass
class ServerDiscoveryHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request[0]
socket = self.request[1]
print("Discovery server: ", self.client_address, "-", data)
socket.sendto("Hi from discovery server " + platform.node(), self.client_address)
class CGSyncHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
print("CGSync server: ", self.client_address, "-", data)
self.request.sendall("Hi from cgsync server " + platform.node())
def startThreadedServer(s, name):
print(">>> Starting " + name)
s.serve_forever()
print(">>> Stopping " + name)
HOST, PORT = "239.5.0.5", 1505
discoveryServer = ServerDiscoveryServer((HOST, PORT), ServerDiscoveryHandler)
discoveryThread = threading.Thread(target = startThreadedServer, args = (discoveryServer, "CGsync service discovery server"))
discoveryThread.daemon = True
discoveryThread.start()
cgsyncServer = SocketServer.TCPServer(("0.0.0.0", 2505), CGSyncHandler)
print(cgsyncServer.server_address)
startThreadedServer(cgsyncServer, "CGSync server")
|
riotrunner.py | """
RIOT Runner class
"""
__author__ = "Bruno Chianca Ferreira"
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Bruno Chianca Ferreira"
__email__ = "brunobcf@gmail.com"
import traceback, os, logging, time, subprocess, threading
from classes.runner.runner import Runner
from core.nodes.base import CoreNode
from classes.mobility import mobility
class RIOTRunner(Runner):
def __init__(self, emulation):
self.setup(emulation)
self.nodes_digest = {}
self.iosocket_semaphore = False
def setup(self, emulation):
self.topology = emulation['riot']['topology']
self.number_of_nodes = emulation['riot']['number_of_nodes']
self.core = True if emulation['riot']['core'] == "True" else False
self.serial = True if emulation['riot']['serial'] == "True" else False
self.disks = True if emulation['riot']['disks'] == "True" else False
self.dump = True if emulation['riot']['dump'] == "True" else False
self.mobility_model = emulation['riot']['mobility']
self.app_dir = emulation['riot']['app_dir']
self.app = emulation['riot']['app']
self.Mobility = mobility.Mobility(self, self.mobility_model)
def start(self):
self.run()
def run(self):
"""
Runs the emulation of RIOT OS Applications
"""
#start core
if self.core:
self.core_topology()
self.configure_batman()
#start dumps
if self.dump:
#get simdir
simdir = str(time.localtime().tm_year) + "_" + str(time.localtime().tm_mon) + "_" + str(time.localtime().tm_mday) + "_" + str(time.localtime().tm_hour) + "_" + str(time.localtime().tm_min)
#createDumps(number_of_nodes, "./reports/" + simdir + "/tracer")
if self.omnet:
self.tcpdump(self.number_of_nodes, "./reports/" + simdir + "/tracer")
if self.core:
self.tcpdump_core(self.number_of_nodes, "./reports/" + simdir + "/tracer")
if self.core:
#pass
sthread = threading.Thread(target=self.server_thread, args=())
sthread.start()
#self.configure_bridge()
self.configure_serial(self.number_of_nodes)
riot_nodes = self.run_riot(self.session, self.number_of_nodes, self.app_dir, self.app)
while True:
time.sleep(0.1)
# shutdown session
logging.info("Simulation finished. Killing all processes")
if self.core:
self.coreemu.shutdown()
os.system("sudo killall xterm")
os.system("chown -R " + username + ":" + username + " ./reports")
def configure_serial(self, number_of_nodes):
pass
def configure_bridge(self):
process = []
for i in range(0,self.number_of_nodes):
shell = self.session.get_node(i+1, CoreNode).termcmdstring(sh="/bin/bash")
command = "ip tuntap add tap0 mode tap"
command += " && ip link add br0 type bridge"
command += " && ip link set br0 up"
command += " && ip link set tap0 up"
command += " && ip link set tap0 master br0"
command += " && ip link set bat0 master br0"
shell += " -c '" + command + "'"
node = subprocess.Popen([
"xterm",
"-e",
shell], stdin=subprocess.PIPE, shell=False)
process.append(node)
def run_riot(self, session, number_of_nodes, app_dir, app):
print("Starting RIOT Application: " + app)
nodes = {}
for i in range(0,number_of_nodes):
shell = session.get_node(i+1, CoreNode).termcmdstring(sh="/bin/bash")
command = app_dir
command += "/" + app
shell += " -c '" + command + "'"
node = subprocess.Popen([
"xterm",
"-e",
shell], stdin=subprocess.PIPE, shell=False)
nodes["drone" + str(i)] = node
return nodes
|
supl-hack.py | #!/usr/bin/python3
from binascii import a2b_hex
import logging
from random import randint
import socket
import ssl
import struct
import threading
SUPL_HOST = "supl.google.com"
SUPL_PORT = 7275
MY_IMSI = "262011234567890"
logging.basicConfig(level=logging.INFO)
log = logging.getLogger
class closed(socket.error):
pass
def to_tbcd(imsi: str) -> bytes:
imsi += "F" * (len(imsi) % 2)
reversed = imsi[::-1]
twisted = ""
while reversed:
twisted = reversed[:2] + twisted
reversed = reversed[2:]
return a2b_hex(twisted)
def to_bitstring(data: bytes) -> str:
rv = ""
for i in data:
rv += "%08d" % int(bin(i)[2:])
return rv
def from_bitstring(data: str) -> bytes:
ld = len(data)
ba = bytearray(ld // 8)
for i in range(0, len(data), 8):
ba[i // 8] = int(data[i:i+8], 2)
return bytes(ba)
def forward_packet(fd, srv, orig, replacement):
data = fd.recv(2)
if not data:
raise closed()
length = struct.unpack(">H", data)[0]
data += fd.recv(length - 2)
bs = to_bitstring(data)
if orig in bs:
log(__name__).info("Imsi replaced")
bs = bs.replace(orig, replacement)
data = from_bitstring(bs)
srv.send(data)
def handle_connection(fd, peer):
log(__name__).info("Connection from %s:%d accepted", *peer)
my_imsi = to_bitstring(to_tbcd(MY_IMSI))
fake = to_bitstring(to_tbcd("26201%10d" % randint(1011111111, 9999999999)))
try:
fd.settimeout(1.0)
raw_srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv = ssl.wrap_socket(raw_srv)
srv.connect((SUPL_HOST, SUPL_PORT))
while True:
forward_packet(fd, srv, my_imsi, fake)
forward_packet(srv, fd, fake, my_imsi)
except socket.timeout:
pass
except closed:
pass
finally:
fd.close()
srv.close()
log(__name__).info("Connection to %s:%d closed", *peer)
def main(port):
a_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
a_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# uncomment this for TLS support
# a_sock = ssl.wrap_socket(a_sock,
# "KEYFILE.pem",
# "CERTFILE.pem",
# server_side=True)
a_sock.bind(("0.0.0.0", port))
a_sock.listen(5)
log(__name__).info("Listening on port %d", port)
while True:
fd, peer = a_sock.accept()
t = threading.Thread(target=handle_connection, args=(fd, peer),
daemon=True)
t.start()
main(7275)
|
test_wsgi_compliance.py | #!/usr/bin/env python
import os
import time
from multiprocessing import Process
from wsgiref.validate import validator
try:
from http import client as httplib
except ImportError: # Py 2
import httplib
import bjoern
@validator
def _app(environ, start_response):
start_response("200 OK", [("Content-Type", "text/plain")])
return [b"Hello World"]
def _start_server():
bjoern.run(_app, 'localhost', 8080)
def test_compliance():
p = Process(target=_start_server)
p.start()
time.sleep(3) # Should be enough for the server to start
try:
h = httplib.HTTPConnection('localhost', 8080)
h.request("GET", "/")
response = h.getresponse()
finally:
p.terminate()
assert response.reason == "OK"
if __name__ == "__main__":
try:
test_compliance()
except AssertionError:
raise SystemExit("Test failed")
else:
print("Test successful")
|
testinggamepad.py | from inputs import get_gamepad
import time
import threading
import serial
from numpy import interp
#def rangeMap(num):
# return int(interp(num,[-32768,32768],[0,255]))
global ser
#ser = serial.Serial('/dev/ttyACM0', 9600)
ser = serial.Serial('COM9', 9600)
def readGP():
while 1:
global BTN,state,types
events = get_gamepad()
for event in events:
BTNc=event.code
if BTNc!='SYN_REPORT':
types=event.ev_type
BTN=event.code
state=event.state
#time.sleep(1)
def monitor():
while 1:
#print(BTN,state)
if(state!=0):
if(types=='Absolute'):
if(BTN=='ABS_HAT0Y' and state==-1):
print("UP Button pressed")
ser.write('U'.encode())
elif(BTN=='ABS_HAT0Y' and state==1):
print("DOWN Button pressed")
ser.write("D".encode())
elif(BTN=='ABS_HAT0X' and state==-1):
print("LEFT Button pressed")
ser.write("L".encode())
elif(BTN=='ABS_HAT0X' and state==1):
print("RIGHT Button pressed")
ser.write("R".encode())
else:
print(BTN,state)
#ser.write(chr(rangeMap(state)).encode())
# ser.write(chr(rangeMap(state)).encode())
# never ever ever ever fucking ser.write() this shit cause it will cause error
#ser.write(BTN.encode())
#ser.write(chr(abs(state)))
#if (BTN is ('ABS_X'or'ABS_Y'or'ABS_RX'or'ABS_RY')):
# ser.write(chr(rangeMap(state)).encode())
#else:
# ser.write(chr(state).encode())
elif(types=='Key'):
print(BTN)
if BTN is 'BTN_NORTH':
ser.write('N'.encode())
if BTN is 'BTN_SOUTH':
ser.write('S'.encode())
if BTN is 'BTN_WEST':
ser.write('W'.encode())
if BTN is 'BTN_EAST':
ser.write('E'.encode())
if BTN is 'BTN_TR':
ser.write('s'.encode())
#ser.write("Button Pressed".encode())
else:
print("Button Not pressed".encode())
ser.write("0".encode())
#time.sleep(1)
BTN='None'
state='None'
types='None'
key=0
t1 = threading.Thread(target = readGP)
t2 = threading.Thread(target = monitor)
t1.daemond = True
t2.daemond = True
t1.start()
t2.start()
|
training.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras training and evaluation routines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import multiprocessing
import threading
import time
import warnings
import numpy as np
import six
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import callbacks as cbks
from tensorflow.contrib.keras.python.keras import losses
from tensorflow.contrib.keras.python.keras import metrics as metrics_module
from tensorflow.contrib.keras.python.keras import optimizers
from tensorflow.contrib.keras.python.keras.engine.topology import Container
from tensorflow.contrib.keras.python.keras.utils.generic_utils import Progbar
# pylint: disable=g-import-not-at-top
try:
import queue
except ImportError:
import Queue as queue
# pylint: enable=g-import-not-at-top
def _standardize_input_data(data,
names,
shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
Arguments:
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that
the batch axis of the arrays matches the expected
value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
Returns:
List of standardized input arrays (one array per model input).
Raises:
ValueError: in case of improperly formatted user-provided data.
"""
if data is None:
return [None for _ in range(len(names))]
if isinstance(data, dict):
arrays = []
for name in names:
if name not in data:
raise ValueError('No data provided for "' + name +
'". Need data for each key in: ' + str(names))
arrays.append(data[name])
elif isinstance(data, list):
if len(data) != len(names):
if data and hasattr(data[0], 'shape'):
raise ValueError('Error when checking ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(
names)) + ' arrays but instead got '
'the following list of ' + str(len(
data)) + ' arrays: ' + str(data)[:200] + '...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise ValueError('Error when checking ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(
names)) + ' Numpy arrays instead. '
'The list you passed was: ' + str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise TypeError('Error when checking ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) != 1:
# Case: model expects multiple inputs but only received
# a single Numpy array.
raise ValueError('The model expects ' + str(len(names)) +
' input arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# Make arrays at least 2D.
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# Check shapes compatibility.
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise ValueError(
'Error when checking ' + exception_prefix + ': expected ' + names[
i] + ' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' + str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_axis:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] + ' to have shape ' + str(
shapes[i]) + ' but got array with shape ' +
str(array.shape))
return arrays
def _standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs.
Arguments:
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
Returns:
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
Raises:
ValueError: In case of invalid user-provided argument.
"""
if x_weight is None or len(x_weight) == 0: # pylint: disable=g-explicit-length-test
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, list) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, list):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' + str(
len(x_weight)) + ' elements, but the model has ' + str(
len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if isinstance(x_weight, dict):
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError('The model has multiple outputs, so `' + weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type + '` type not understood: ' +
str(x_weight))
def _standardize_class_weights(class_weight, output_names):
return _standardize_sample_or_class_weights(class_weight, output_names,
'class_weight')
def _standardize_sample_weights(sample_weight, output_names):
return _standardize_sample_or_class_weights(sample_weight, output_names,
'sample_weight')
def _check_array_lengths(inputs, targets, weights):
"""Does user input validation for numpy arrays.
Arguments:
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
Raises:
ValueError: in case of incorrectly formatted data.
"""
x_lengths = [x.shape[0] for x in inputs]
y_lengths = [y.shape[0] for y in targets]
w_lengths = [w.shape[0] for w in weights]
set_x = set(x_lengths)
if len(set_x) > 1:
raise ValueError('All input arrays (x) should have '
'the same number of samples. Got array shapes: ' + str(
[x.shape for x in inputs]))
set_y = set(y_lengths)
if len(set_y) > 1:
raise ValueError('All target arrays (y) should have '
'the same number of samples. Got array shapes: ' + str(
[y.shape for y in targets]))
set_w = set(w_lengths)
if len(set_w) > 1:
raise ValueError('All sample_weight arrays should have '
'the same number of samples. Got array shapes: ' + str(
[w.shape for w in weights]))
if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
raise ValueError('Input arrays should have '
'the same number of samples as target arrays. '
'Found ' + str(list(set_x)[0]) + ' input samples '
'and ' + str(list(set_y)[0]) + ' target samples.')
if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
raise ValueError('Sample_weight arrays should have '
'the same number of samples as target arrays. Got ' + str(
list(set_y)[0]) + ' input samples and ' + str(
list(set_w)[0]) + ' target samples.')
def _check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatiblity of targets and loss functions.
This helps prevent users from using loss functions incorrectly.
Arguments:
targets: list of Numpy arrays of targets.
loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs.
Raises:
ValueError: if a loss function or target array
is incompatible with an output.
"""
key_losses = {
'mean_square_error', 'binary_crossentropy', 'categorical_crossentropy'
}
for y, loss, shape in zip(targets, loss_fns, output_shapes):
if loss is None:
continue
if loss.__name__ == 'categorical_crossentropy':
if y.shape[-1] == 1:
raise ValueError('You are passing a target array of shape ' + str(
y.shape) + ' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses:
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
raise ValueError('A target array with shape ' + str(
y.shape) + ' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def _collect_metrics(metrics, output_names):
"""Maps metric functions to model outputs.
Arguments:
metrics: a list or dict of metric functions.
output_names: a list of the names (strings) of model outputs.
Returns:
A list (one entry per model output) of lists of metric functions.
For instance, if the model has 2 outputs, and for the first output
we want to compute "binary_accuracy" and "binary_crossentropy",
and just "binary_accuracy" for the second output,
the list would look like:
`[[binary_accuracy, binary_crossentropy], [binary_accuracy]]`
Raises:
TypeError: if an incorrect type is passed for the `metrics` argument.
"""
if not metrics:
return [[] for _ in output_names]
if isinstance(metrics, list):
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif isinstance(metrics, dict):
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if not isinstance(output_metrics, list):
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise TypeError('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' + str(metrics))
def _batch_shuffle(index_array, batch_size):
"""Shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
Arguments:
index_array: array of indices to be shuffled.
batch_size: integer.
Returns:
The `index_array` array, shuffled in a batch-wise fashion.
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def _make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Arguments:
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
Returns:
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batches)]
def _slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `_slice_arrays(x, indices)`
Arguments:
arrays: Single array or list of arrays.
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
Returns:
A slice of the array(s).
"""
if isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in arrays]
else:
return [x[start:stop] for x in arrays]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
else:
return arrays[start:stop]
def _weighted_masked_objective(fn):
"""Adds support for masking and sample-weighting to an objective function.
It transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
Arguments:
fn: The objective function to wrap,
with signature `fn(y_true, y_pred)`.
Returns:
A function with signature `fn(y_true, y_pred, weights, mask)`.
"""
if fn is None:
return None
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
Arguments:
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
Returns:
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def _masked_objective(fn):
"""Adds support for masking to an objective function.
It transforms an objective function `fn(y_true, y_pred)`
into a cost-masked objective function
`fn(y_true, y_pred, mask)`.
Arguments:
fn: The objective function to wrap,
with signature `fn(y_true, y_pred)`.
Returns:
A function with signature `fn(y_true, y_pred, mask)`.
"""
def masked(y_true, y_pred, mask=None):
"""Wrapper function.
Arguments:
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
mask: Mask tensor.
Returns:
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
return K.mean(score_array)
return masked
def _standardize_weights(y,
sample_weight=None,
class_weight=None,
sample_weight_mode=None):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array.
Arguments:
y: Numpy array of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`.
`"temporal"` indicated that we expect 2D weight data
that will be applied to the last 2 dimensions of
the targets (i.e. we are weighting timesteps, not samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' + str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' + str(
sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape ' + str(
sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify '
'sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError('Found a sample_weight with shape' + str(
sample_weight.shape) + '.'
'Expected sample_weight with rank '
'less than or equal to ' + str(len(y.shape)))
if y.shape[:sample_weight.ndim] != sample_weight.shape:
raise ValueError('Found a sample_weight array with shape ' + str(
sample_weight.shape) + ' for an input with shape ' + str(y.shape) +
'. '
'sample_weight cannot be broadcast.')
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
class GeneratorEnqueuer(object):
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
generator: a generator function which endlessly yields data
pickle_safe: use multiprocessing if True, otherwise threading
"""
def __init__(self, generator, pickle_safe=False):
self._generator = generator
self._pickle_safe = pickle_safe
self._threads = []
self._stop_event = None
self.queue = None
def start(self, workers=1, max_q_size=10, wait_time=0.05):
"""Kicks off threads which add data from the generator into the queue.
Arguments:
workers: number of worker threads
max_q_size: queue size (when full, threads could block on put())
wait_time: time to sleep in-between calls to put()
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self._pickle_safe or self.queue.qsize() < max_q_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
self._stop_event.set()
raise
try:
if self._pickle_safe:
self.queue = multiprocessing.Queue(maxsize=max_q_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._pickle_safe:
# Reset random seed else all children processes
# share the same seed
np.random.seed()
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = True
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
Arguments:
timeout: maximum time to wait on thread.join()
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._pickle_safe:
thread.terminate()
else:
thread.join(timeout)
if self._pickle_safe:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
class Model(Container):
"""The `Model` class adds training & evaluation routines to a `Container`.
"""
def compile(self,
optimizer,
loss,
metrics=None,
loss_weights=None,
sample_weight_mode=None):
"""Configures the model for training.
Arguments:
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [losses](/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
RuntimeError: If the model has no loss to optimize.
"""
loss = loss or {}
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(
self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
warnings.warn(
'Output "' + name + '" missing from loss dictionary. '
'We assume this was done on purpose, '
'and we will not be expecting '
'any data to be passed to "' + name + '" during training.',
stacklevel=2)
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' + str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [_weighted_masked_objective(fn) for fn in loss_functions]
skip_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_indices.append(i)
else:
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(self.output_names[i])
self._feed_output_shapes.append(self.internal_output_shapes[i])
self._feed_loss_fns.append(self.loss_functions[i])
# Prepare output masks.
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(
self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' + str(
loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' + str(
loss_weights) + ' - expected a list of dicts.')
# Prepare sample weights.
sample_weights = []
sample_weight_modes = []
if isinstance(sample_weight_mode, dict):
for name in sample_weight_mode:
if name not in self.output_names:
raise ValueError('Unknown entry in '
'sample_weight_mode dictionary: "' + name + '". '
'Only expected the following keys: ' + str(
self.output_names))
for i, name in enumerate(self.output_names):
if i in skip_indices:
weight = None
sample_weight_modes.append(None)
else:
if name not in sample_weight_mode:
raise ValueError('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif isinstance(sample_weight_mode, list):
if len(sample_weight_mode) != len(self.outputs):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed '
'sample_weight_mode=' + str(sample_weight_mode))
for i in range(len(self.output_names)):
if i in skip_indices:
weight = None
sample_weight_modes.append(None)
else:
mode = sample_weight_mode[i]
name = self.output_names[i]
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
for i, name in enumerate(self.output_names):
if i in skip_indices:
sample_weight_modes.append(None)
sample_weights.append(None)
else:
if sample_weight_mode == 'temporal':
sample_weights.append(
K.placeholder(ndim=2, name=name + '_sample_weights'))
sample_weight_modes.append('temporal')
else:
sample_weights.append(
K.placeholder(ndim=1, name=name + '_sample_weights'))
sample_weight_modes.append(None)
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
if i not in skip_indices:
self._feed_sample_weight_modes.append(self.sample_weight_modes[i])
# Prepare targets of model.
self.targets = []
self._feed_targets = []
for i in range(len(self.outputs)):
if i in skip_indices:
self.targets.append(None)
else:
shape = self.internal_output_shapes[i]
name = self.output_names[i]
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self.targets.append(target)
self._feed_targets.append(target)
# Prepare metrics.
self.metrics = metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# Compute total loss.
total_loss = None
for i in range(len(self.outputs)):
if i in skip_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise RuntimeError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# List of same size as output_names.
# contains tuples (metrics for output, names of metrics).
nested_metrics = _collect_metrics(metrics, self.output_names)
def append_metric(layer_num, metric_name, metric_tensor):
"""Helper function used in loop below."""
if len(self.output_names) > 1:
metric_name = self.output_layers[layer_num].name + '_' + metric_name
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_tensor)
for i in range(len(self.outputs)):
if i in skip_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy
# (because of class mode duality)
output_shape = self.internal_output_shapes[i]
acc_fn = None
if output_shape[-1] == 1 or self.loss_functions[
i] == losses.binary_crossentropy:
# case: binary accuracy
acc_fn = metrics_module.binary_accuracy
elif self.loss_functions[i] == losses.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
acc_fn = metrics_module.sparse_categorical_accuracy
else:
acc_fn = metrics_module.categorical_accuracy
masked_fn = _masked_objective(acc_fn)
append_metric(i, 'acc', masked_fn(y_true, y_pred, mask=masks[i]))
else:
metric_fn = metrics_module.get(metric)
masked_metric_fn = _masked_objective(metric_fn)
metric_result = masked_metric_fn(y_true, y_pred, mask=masks[i])
metric_result = {metric_fn.__name__: metric_result}
for name, tensor in six.iteritems(metric_result):
append_metric(i, name, tensor)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
self.sample_weights = sample_weights
self._feed_sample_weights = []
for i in range(len(self.sample_weights)):
if i not in skip_indices:
self._feed_sample_weights.append(sample_weights[i])
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights and sort them deterministically.
trainable_weights = self.trainable_weights
# Sort weights by name.
if trainable_weights:
trainable_weights.sort(key=lambda x: x.name)
self._collected_trainable_weights = trainable_weights
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
if self.train_function is None:
inputs = (
self._feed_inputs + self._feed_targets + self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
training_updates = self.optimizer.get_updates(
self._collected_trainable_weights, self.constraints, self.total_loss)
updates = self.updates + training_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors, updates=updates)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = (
self._feed_inputs + self._feed_targets + self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=self.state_updates)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
self.predict_function = K.function(
inputs, self.outputs, updates=self.state_updates)
def _fit_loop(self,
f,
ins,
out_labels=None,
batch_size=32,
epochs=100,
verbose=1,
callbacks=None,
val_f=None,
val_ins=None,
shuffle=True,
callback_metrics=None,
initial_epoch=0):
"""Abstract fit function for `f(ins)`.
Assume that f returns a list, labeled by out_labels.
Arguments:
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
epochs: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
Returns:
`History` object.
"""
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
if ins and hasattr(ins[0], 'shape'):
num_train_samples = ins[0].shape[0]
else:
# May happen if we are running `fit` without Numpy input data,
# i.e. if all inputs to the models are data tensors
# instead of placeholders.
# In that case we will run `fit` over a single batch.
num_train_samples = batch_size
verbose = 2
index_array = np.arange(num_train_samples)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
out_labels = out_labels or []
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'batch_size': batch_size,
'epochs': epochs,
'samples': num_train_samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
})
callbacks.on_train_begin()
callback_model.stop_training = False
for cbk in callbacks:
cbk.validation_data = val_ins
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = _batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = _make_batches(num_train_samples, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], float):
# do not slice the training phase flag
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(
val_f, val_ins, batch_size=batch_size, verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
"""Abstract method to loop over some data in batches.
Arguments:
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
if ins and hasattr(ins[0], 'shape'):
samples = ins[0].shape[0]
else:
# May happen if we are running `predict` without Numpy input data,
# i.e. if all inputs to the models are data tensors
# instead of placeholders.
# In that case we will run `predict` over a single batch.
samples = batch_size
verbose = 2
outs = []
if verbose == 1:
progbar = Progbar(target=samples)
batches = _make_batches(samples, batch_size)
index_array = np.arange(samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if ins and isinstance(ins[-1], float):
# do not slice the training phase flag
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (samples,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=batch_out.dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
"""Abstract method to loop over some data in batches.
Arguments:
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
if ins and hasattr(ins[0], 'shape'):
samples = ins[0].shape[0]
else:
# May happen if we are running `evaluate` without Numpy input data,
# i.e. if all inputs to the models are data tensors
# instead of placeholders.
# In that case we will run `evaluate` over a single batch.
samples = batch_size
verbose = 2
outs = []
if verbose == 1:
progbar = Progbar(target=samples)
batches = _make_batches(samples, batch_size)
index_array = np.arange(samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], float):
# do not slice the training phase flag
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i in range(len(outs)):
outs[i] /= samples
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self,
x,
y,
sample_weight=None,
class_weight=None,
check_batch_axis=True,
batch_size=None):
if not hasattr(self, 'optimizer'):
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(losses, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = _standardize_input_data(
x,
self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False,
exception_prefix='model input')
y = _standardize_input_data(
y,
self._feed_output_names,
output_shapes,
check_batch_axis=False,
exception_prefix='model target')
sample_weights = _standardize_sample_weights(sample_weight,
self._feed_output_names)
class_weights = _standardize_class_weights(class_weight,
self._feed_output_names)
sample_weights = [
_standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
self._feed_sample_weight_modes)
]
_check_array_lengths(x, y, sample_weights)
_check_loss_and_target_compatibility(y, self._feed_loss_fns,
self._feed_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' + str(
x[0].shape[0]) + ' samples')
return x, y, sample_weights
def fit(self,
x=None,
y=None,
batch_size=32,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
epochs: integer, the number of times to iterate
over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate
the loss and any model metrics
at the end of each epoch. The model will not
be trained on this data.
This could be a tuple (x_val, y_val)
or a tuple (x_val, y_val, val_sample_weights).
shuffle: boolean, whether to shuffle the training data
before each epoch.
class_weight: optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
Returns:
A `History` instance. Its `history` attribute contains
all information collected during training.
Raises:
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# validate user data
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=False,
batch_size=batch_size)
# prepare validation data
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError('When passing validation_data, '
'it must contain 2 (x_val, y_val) '
'or 3 (x_val, y_val, val_sample_weights) '
'items, however it contains %d items' %
len(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
check_batch_axis=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (_slice_arrays(x, 0, split_at), _slice_arrays(x, split_at))
y, val_y = (_slice_arrays(y, 0, split_at), _slice_arrays(y, split_at))
sample_weights, val_sample_weights = (_slice_arrays(
sample_weights, 0, split_at), _slice_arrays(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# prepare input arrays and training function
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# prepare display labels
out_labels = self.metrics_names
# rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows)
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
out_labels = deduped_out_labels
if do_validation:
callback_metrics = copy.copy(out_labels) + [
'val_' + n for n in out_labels
]
else:
callback_metrics = copy.copy(out_labels)
# delegate logic to _fit_loop
return self._fit_loop(
f,
ins,
out_labels=out_labels,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_f=val_f,
val_ins=val_ins,
shuffle=shuffle,
callback_metrics=callback_metrics,
initial_epoch=initial_epoch)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
verbose: verbosity mode, 0 or 1.
sample_weight: Array of weights to weight the contribution
of different samples to the loss and metrics.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
# validate user data
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
check_batch_axis=False,
batch_size=batch_size)
# prepare inputs, delegate logic to _test_loop
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins, batch_size=batch_size, verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
# validate user data
x = _standardize_input_data(
x,
self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' + str(
x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# prepare inputs, delegate logic to _predict_loop
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins, batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y, sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
"""Test the model on a single batch of samples.
Arguments:
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input samples, as a Numpy array.
Returns:
Numpy array(s) of predictions.
"""
x = _standardize_input_data(x, self._feed_input_names,
self._feed_input_shapes)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_q_size=10,
workers=1,
pickle_safe=False,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
Arguments:
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
samples have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of unique samples if your dataset
divided by the batch size.
epochs: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
workers: maximum number of processes to spin up
when using process based threading
pickle_safe: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
Returns:
A `History` object.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
wait_time = 0.01 # in seconds
epoch = initial_epoch
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not validation_steps:
raise ValueError('When using a generator for validation data, '
'you must specify a value for '
'`validation_steps`.')
out_labels = self.metrics_names
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger(count_mode='steps')]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError('validation_data should be a tuple '
'`(val_x, val_y, val_sample_weight)` '
'or `(val_x, val_y)`. Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x, val_y, val_sample_weight)
for cbk in callbacks:
cbk.validation_data = val_x + [val_y, val_sample_weights]
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(max_q_size=max_q_size, workers=workers)
callback_model.stop_training = False
while epoch < epochs:
callbacks.on_epoch_begin(epoch)
steps_done = 0
batch_index = 0
while steps_done < steps_per_epoch:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
raise ValueError('output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output # pylint: disable=unpacking-non-sequence
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output # pylint: disable=unpacking-non-sequence
else:
raise ValueError('output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = self.train_on_batch(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# Construct epoch logs.
epoch_logs = {}
batch_index += 1
steps_done += 1
# Epoch finished.
if steps_done >= steps_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(
validation_data,
validation_steps,
max_q_size=max_q_size,
workers=workers,
pickle_safe=pickle_safe)
else:
# No need for try/except because
# data has already been validated.
val_outs = self.evaluate(
val_x,
val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
finally:
if enqueuer is not None:
enqueuer.stop()
callbacks.on_train_end()
return self.history
def evaluate_generator(self,
generator,
steps,
max_q_size=10,
workers=1,
pickle_safe=False):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_q_size: maximum size for the generator queue
workers: maximum number of processes to spin up
when using process based threading
pickle_safe: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
self._make_test_function()
steps_done = 0
wait_time = 0.01
all_outs = []
batch_sizes = []
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(workers=workers, max_q_size=max_q_size)
while steps_done < steps:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
raise ValueError('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output # pylint: disable=unpacking-non-sequence
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output # pylint: disable=unpacking-non-sequence
else:
raise ValueError('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
if isinstance(x, list):
batch_size = len(x[0])
elif isinstance(x, dict):
batch_size = len(list(x.values())[0])
else:
batch_size = len(x)
all_outs.append(outs)
steps_done += 1
batch_sizes.append(batch_size)
finally:
if enqueuer is not None:
enqueuer.stop()
if not isinstance(outs, list):
return np.average(np.asarray(all_outs), weights=batch_sizes)
else:
averages = []
for i in range(len(outs)):
averages.append(
np.average([out[i] for out in all_outs], weights=batch_sizes))
return averages
def predict_generator(self,
generator,
steps,
max_q_size=10,
workers=1,
pickle_safe=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: Generator yielding batches of input samples.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_q_size: Maximum size for the generator queue.
workers: Maximum number of processes to spin up
when using process based threading
pickle_safe: If `True`, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
verbose: verbosity mode, 0 or 1.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
self._make_predict_function()
steps_done = 0
wait_time = 0.01
all_outs = []
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(workers=workers, max_q_size=max_q_size)
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
# Compatibility with the generators
# used for training.
if len(generator_output) == 2:
x, _ = generator_output # pylint: disable=unpacking-non-sequence
elif len(generator_output) == 3:
x, _, _ = generator_output # pylint: disable=unpacking-non-sequence
else:
raise ValueError('output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
else:
# Assumes a generator that only
# yields inputs (not targets and sample weights).
x = generator_output
outs = self.predict_on_batch(x)
if not isinstance(outs, list):
outs = [outs]
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_outs) == 1:
if steps_done == 1:
return all_outs[0][0]
else:
return np.concatenate(all_outs[0])
if steps_done == 1:
return [out for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
|
gfile_cache_test.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from jax.experimental.compilation_cache.gfile_cache import GFileCache
import jax._src.test_util as jtu
import tempfile
import threading
class FileSystemCacheTest(jtu.JaxTestCase):
def test_get_nonexistent_key(self):
with tempfile.TemporaryDirectory() as tmpdir:
cache = GFileCache(tmpdir)
self.assertEqual(cache.get("nonExistentKey"), None)
def test_put_and_get_key(self):
with tempfile.TemporaryDirectory() as tmpdir:
cache = GFileCache(tmpdir)
cache.put("foo", b"bar")
self.assertEqual(cache.get("foo"), b"bar")
def test_existing_cache_path(self):
with tempfile.TemporaryDirectory() as tmpdir:
cache1 = GFileCache(tmpdir)
cache1.put("foo", b"bar")
del cache1
cache2 = GFileCache(tmpdir)
self.assertEqual(cache2.get("foo"), b"bar")
def test_empty_value_put(self):
with tempfile.TemporaryDirectory() as tmpdir:
cache = GFileCache(tmpdir)
cache.put("foo", b"")
self.assertEqual(cache.get("foo"), b"")
def test_empty_key_put(self):
with tempfile.TemporaryDirectory() as tmpdir:
cache = GFileCache(tmpdir)
with self.assertRaisesRegex(ValueError , r"key cannot be empty"):
cache.put("", b"bar")
def test_empty_key_get(self):
with tempfile.TemporaryDirectory() as tmpdir:
cache = GFileCache(tmpdir)
with self.assertRaisesRegex(ValueError , r"key cannot be empty"):
cache.get("")
def test_threads(self):
file_contents1 = "1" * (65536 + 1)
file_contents2 = "2" * (65536 + 1)
def call_multiple_puts_and_gets(cache):
for i in range(50):
cache.put("foo", file_contents1.encode('utf-8').strip())
cache.put("foo", file_contents2.encode('utf-8').strip())
cache.get("foo")
self.assertEqual(cache.get("foo"), file_contents2.encode('utf-8').strip())
with tempfile.TemporaryDirectory() as tmpdir:
cache = GFileCache(tmpdir)
threads = []
for i in range(50):
t = threading.Thread(target=call_multiple_puts_and_gets(cache))
t.start()
threads.append(t)
for t in threads:
t.join()
self.assertEqual(cache.get("foo"), file_contents2.encode('utf-8').strip())
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
cf_contests.py | import os
from threading import Thread
from prettytable import PrettyTable
from .cf_utils import read_data_from_file, write_data_to_file, Obj, obj_to_dict
from .codeforces import CodeforcesAPI
codeforces = CodeforcesAPI()
cache_loc = os.path.join(os.environ['HOME'], '.cache', 'cf_submit')
contests_loc = os.path.join(cache_loc, 'contests.json')
def load_contests(pretty_off):
contests = [Obj(contest) for contest in read_data_from_file(contests_loc) or []]
if len(contests) == 0:
write_data_to_file(obj_to_dict(codeforces.contestList()), contests_loc)
contests = [Obj(contest) for contest in read_data_from_file(contests_loc) or []]
else:
Thread(target=write_data_to_file, args=(codeforces.contestList(), contests_loc)).start()
contests.sort(key=lambda contest: contest.id, reverse=True)
if pretty_off:
print(' '.join(map(str, map(lambda x: x.id, contests))))
else:
print_pretty(contests[0:20])
def print_pretty(contests):
contests_table = PrettyTable()
contests_table.field_names = ['Id', 'Name']
for contest in contests:
contests_table.add_row([contest.id, contest.name])
contests_table.hrules = True
contests_table.align['Name'] = 'l'
print(contests_table.get_string(sortby='Id', reversesort=True))
|
PyxelChip8.py | # PyxelChip8 v0.1: A CHIP8 emulator in Pyxel/Python
# Copyright (c) 2022 Kumogata Computing Laboratory.
# All Rights Reserved.
import pyxel
import threading
from System import *
class PyxelChip8:
# Constants
width = 64
height = 32
pixel = 4
# 0: white 1: yellow 2: blue 3: green
# 4: red 5: default 6: pink 7: pale green
theme = 8
# References
_Sys = None
# ------------------------------------------------------------
# Main Routine
# ------------------------------------------------------------
# Constructor
def __init__( self ):
pyxel.init( self.width*self.pixel, self.height*self.pixel ,
title="PyxelChip8 v0.1", fps=20)
pyxel.load( "PyxelChip8.pyxres")
# Create Chip8's System
self._Sys = System()
# Initialize Chip8's System
if ( len( sys.argv ) < 2 or
self._Sys.Init( self, sys.argv[ 1 ] ) < 0 ) :
# Failed
print ("Usage: python " + sys.argv[ 0 ] + " <ROM file name>")
sys.exit()
# Start Chip8's System
threading.Thread(target=self._Sys.Run, args=()).start()
# Start Pyxel's System
pyxel.run(self.update,self.draw)
def update( self ):
# Key Events
self.update_key_press()
self.update_key_release()
def draw( self ):
pyxel.cls(0)
for _y in range( self.height ) :
for _x in range( self.width ) :
if ( self._Sys._PPU.PPU_GetPixel( _x, _y ) ) :
# Draw Rectangle
pyxel.blt( _x*self.pixel, _y*self.pixel,
0, 0, self.theme*self.pixel,
self.pixel, self.pixel)
else :
# Draw None
pyxel.blt( _x*self.pixel, _y*self.pixel,
0, self.pixel, self.theme*self.pixel,
self.pixel, self.pixel)
# Original |1|2|3|C| Mapping to |1|2|3|4|
# |4|5|6|D| |Q|W|E|R|
# |7|8|9|E| |A|S|D|F|
# |A|0|B|F| |Z|X|C|V|
# Key Pressed
def update_key_press( self ):
if pyxel.btnp( pyxel.KEY_X ) :
self._Sys._IO.Key |= ( 1 << 0 )
if pyxel.btnp( pyxel.KEY_1 ) :
self._Sys._IO.Key |= ( 1 << 1 )
if pyxel.btnp( pyxel.KEY_2 ) :
self._Sys._IO.Key |= ( 1 << 2 )
if pyxel.btnp( pyxel.KEY_3 ) :
self._Sys._IO.Key |= ( 1 << 3 )
if pyxel.btnp( pyxel.KEY_Q ) :
self._Sys._IO.Key |= ( 1 << 4 )
if pyxel.btnp( pyxel.KEY_W ) :
self._Sys._IO.Key |= ( 1 << 5 )
if pyxel.btnp( pyxel.KEY_E ) :
self._Sys._IO.Key |= ( 1 << 6 )
if pyxel.btnp( pyxel.KEY_A ) :
self._Sys._IO.Key |= ( 1 << 7 )
if pyxel.btnp( pyxel.KEY_S ) :
self._Sys._IO.Key |= ( 1 << 8 )
if pyxel.btnp( pyxel.KEY_D ) :
self._Sys._IO.Key |= ( 1 << 9 )
if pyxel.btnp( pyxel.KEY_Z ) :
self._Sys._IO.Key |= ( 1 << 10 )
if pyxel.btnp( pyxel.KEY_C ) :
self._Sys._IO.Key |= ( 1 << 11 )
if pyxel.btnp( pyxel.KEY_4 ) :
self._Sys._IO.Key |= ( 1 << 12 )
if pyxel.btnp( pyxel.KEY_R ) :
self._Sys._IO.Key |= ( 1 << 13 )
if pyxel.btnp( pyxel.KEY_F ) :
self._Sys._IO.Key |= ( 1 << 14 )
if pyxel.btnp( pyxel.KEY_V ) :
self._Sys._IO.Key |= ( 1 << 15 )
# Key Released
def update_key_release( self ):
if pyxel.btnr( pyxel.KEY_X ) :
self._Sys._IO.Key &= ~( 1 << 0 )
if pyxel.btnr( pyxel.KEY_1 ) :
self._Sys._IO.Key &= ~( 1 << 1 )
if pyxel.btnr( pyxel.KEY_2 ) :
self._Sys._IO.Key &= ~( 1 << 2 )
if pyxel.btnr( pyxel.KEY_3 ) :
self._Sys._IO.Key &= ~( 1 << 3 )
if pyxel.btnr( pyxel.KEY_Q ) :
self._Sys._IO.Key &= ~( 1 << 4 )
if pyxel.btnr( pyxel.KEY_W ) :
self._Sys._IO.Key &= ~( 1 << 5 )
if pyxel.btnr( pyxel.KEY_E ) :
self._Sys._IO.Key &= ~( 1 << 6 )
if pyxel.btnr( pyxel.KEY_A ) :
self._Sys._IO.Key &= ~( 1 << 7 )
if pyxel.btnr( pyxel.KEY_S ) :
self._Sys._IO.Key &= ~( 1 << 8 )
if pyxel.btnr( pyxel.KEY_D ) :
self._Sys._IO.Key &= ~( 1 << 9 )
if pyxel.btnr( pyxel.KEY_Z ) :
self._Sys._IO.Key &= ~( 1 << 10 )
if pyxel.btnr( pyxel.KEY_C ) :
self._Sys._IO.Key &= ~( 1 << 11 )
if pyxel.btnr( pyxel.KEY_4 ) :
self._Sys._IO.Key &= ~( 1 << 12 )
if pyxel.btnr( pyxel.KEY_R ) :
self._Sys._IO.Key &= ~( 1 << 13 )
if pyxel.btnr( pyxel.KEY_F ) :
self._Sys._IO.Key &= ~( 1 << 14 )
if pyxel.btnr( pyxel.KEY_V ) :
self._Sys._IO.Key &= ~( 1 << 15 )
# Main
PyxelChip8()
|
reachability_parralel_bfs.py | import sys
from queue import Queue
import threading
class MazeGraph:
result = 0
s = set()
lock = threading.Lock()
def __init__(self, n, adj):
self.adj = adj
self.visited = [False] * n
self.enqueued = [False] * n
self.q = Queue(n)
def visit(self, v):
self.visited[v] = True
if v in MazeGraph.s:
MazeGraph.lock.acquire()
MazeGraph.result = 1
endThread = True
MazeGraph.lock.release()
else:
MazeGraph.lock.acquire()
MazeGraph.s.add(v)
MazeGraph.lock.release()
endThread = MazeGraph.result == 1
return endThread
def isVisited(self, v, visited):
return visited[v]
def enqueue(self, v):
if not self.enqueued[v]:
self.q.put(v)
self.enqueued[v] = True
# Time Complexity: O(|V| + |E|)
# Space Complexity: O(|V|):
def explore(self, u):
self.enqueue(u)
while not self.q.empty():
u = self.q.get()
if self.visit(u):
return 1
for a in self.adj[u]:
self.enqueue(a)
return 0
def reach(n, edges, u, v):
adj = [[] for _ in range(n)]
for (a, b) in edges:
adj[a - 1].append(b - 1)
adj[b - 1].append(a - 1)
aMaze1 = MazeGraph(n, adj)
aMaze2 = MazeGraph(n, adj)
t1 = threading.Thread(target=aMaze1.explore, args=(u, ))
t2 = threading.Thread(target=aMaze2.explore, args=(v, ))
t1.start()
t2.start()
t1.join()
t2.join()
return MazeGraph.result
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))
x, y = data[2 * m:]
x, y = x - 1, y - 1
print(reach(n, edges, x, y))
|
panel.py | print("Loaded pi_control panel module")
import os
import threading
import time
import yaml
import pi_control.__init__
import pi_control.device
"""
2022-01-01 Added option to read from a config file.
2022-01-02 Added monitoring for devices without events.
2022-01-08 Separated expanders, outputs, and inputs in the config.
To do:
Separate actions into class
Should take_action() run in a thread?
"""
"""
import pi_control.panel
"""
debug = False
class Panel:
"""
panel = pi_control.panel.Panel(name, config_filename || devices_dict)
"""
def __init__(self, panel_name, devices={}, debug_pref=False):
global debug
if debug_pref:
debug = True
self._name = str(panel_name)
if type(devices) is str:
devices = self.read_conf(devices)
if type(devices) is not dict:
raise TypeError("Invalid devices dictionary")
self._polling_interval = 2.5
if debug:
print("devices:", devices)
self._expanders = {}
self._outputs = {}
self._inputs = {}
# Init expanders
if 'expanders' in devices:
for name, device_info in devices['expanders'].items():
if 'type' not in device_info or type(device_info['type']) is not str:
raise AttributeError("Expander {} is missing a valid type".format(name))
device_info['panel'] = self
self._expanders[name] = pi_control.device.ExpanderDevice(name, device_info, debug)
# Fill source_devices and init outputs
if 'outputs' in devices:
for name, device_info in devices['outputs'].items():
if 'type' not in device_info or type(device_info['type']) is not str:
raise AttributeError("Output {} is missing a valid type".format(name))
device_info['panel'] = self
if 'source_device' in device_info:
if device_info['source_device'] not in self._expanders:
raise AttributeError("Source device {} for {} not found".format(device_info['source_device'], name))
if not self._expanders[device_info['source_device']]._chip:
raise AttributeError("Source device {} for {} must be an expander device".format(device_info['source_device'], name))
device_info['source_device'] = self._expanders[device_info['source_device']]
if device_info['type'] == 'led':
self._outputs[name] = pi_control.device.LED(name, device_info, debug)
elif device_info['type'] == 'haptic':
self._outputs[name] = pi_control.device.Haptic(name, device_info, debug)
elif device_info['type'] == 'http':
self._outputs[name] = pi_control.device.HTTP(name, device_info, debug)
elif device_info['type'] == 'message':
self._outputs[name] = pi_control.device.Message(name, device_info, debug)
elif device_info['type'] == 'sound':
self._outputs[name] = pi_control.device.Sound(name, device_info, debug)
else:
raise ValueError("Device type {} not found".format(device_info['type']))
# Fill actions and init inputs
needs_monitoring = False
if 'inputs' in devices:
for name, device_info in devices['inputs'].items():
if 'type' not in device_info or type(device_info['type']) is not str:
raise AttributeError("Input {} is missing a valid type".format(name))
device_info['panel'] = self
if 'source_device' in device_info:
if device_info['source_device'] not in self._expanders:
raise AttributeError("Source device {} for {} not found".format(device_info['source_device'], name))
if not self._expanders[device_info['source_device']]._chip:
raise AttributeError("Source device {} for {} must be an expander device".format(device_info['source_device'], name))
device_info['source_device'] = self._expanders[device_info['source_device']]
# Process actions
device = None
if device_info['type'] == 'button':
device = pi_control.device.Button(name, device_info, debug)
elif device_info['type'] == 'potentiometer':
device = pi_control.device.Potentiometer(name, device_info, debug)
elif device_info['type'] == 'rotary_encoder':
device = pi_control.device.RotaryEncoder(name, device_info, debug)
else:
raise ValueError("Device type {} not found".format(device_info['type']))
if device._needs_monitoring:
needs_monitoring = True
if pi_control.is_method(device, 'update_status'):
device.update_status(True)
self._inputs[name] = device
# Set monitoring thread
if needs_monitoring:
if debug:
print("Starting monitoring")
self._monitor_stop = False
self._monitor_thread = threading.Thread(target=self.monitor_devices, args=(lambda : self._monitor_stop, ))
self._monitor_thread.start()
@property
def name(self):
return self._name
@property
def expanders(self):
return self._expanders
@property
def outputs(self):
return self._outputs
@property
def inputs(self):
return self._inputs
def get_expander(self, device_name):
if device_name in self.expanders:
return self.expanders[device_name]
return None
def get_output(self, device_name):
if device_name in self.outputs:
return self.outputs[device_name]
return None
def get_input(self, device_name):
if device_name in self.inputs:
return self.inputs[device_name]
return None
def take_action(self, input_device, action_name, startup=False):
# print(input_device.name + ": Take action")
actions = input_device.get_actions(action_name)
cnt = 0
for action in actions:
if 'name' not in action:
raise KeyError("Name is required in action {} in action for {}.{}".format(cnt, device_name, action_name))
cnt += 1
# On init, skip non-init actions
if startup and ('init' not in action or not action['init']):
continue
# Defined actions
if 'name' in action:
if action['name'] not in self._outputs:
raise ValueError("Output {} in action for {}.{} not found".format(action['name'], device_name, action_name))
# print(" action()")
device = self._outputs[action['name']]
if pi_control.is_method(device, 'action'):
device.action(action)
def monitor_devices(self, stop_function=lambda:True):
while 42:
for name, device in self._inputs.items():
if not device._needs_monitoring:
continue
if pi_control.is_method(device, 'update_status'):
device.update_status()
# action_key = device.monitor()
# if action_key:
# device.change_status(action_key)
if stop_function():
break
if stop_function():
return
time.sleep(self._polling_interval)
return True
def read_conf(self, path):
if os.path.exists(path):
with open(path) as file:
data = yaml.load(file, Loader=yaml.FullLoader)
if not len(data):
print("Config file is empty.")
raise ValueError;
return data
else:
print("Config file not found at '{}'".format(path))
raise FileNotFoundError;
|
serial_handler.py | """
SerialHandler class for PyGPSClient application
This handles all the serial i/o , threaded read process and direction to
the appropriate protocol handler
Created on 16 Sep 2020
:author: semuadmin
:copyright: SEMU Consulting © 2020
:license: BSD 3-Clause
"""
import logging
from io import BufferedReader
from threading import Thread
from serial import Serial, SerialException, SerialTimeoutException
from pynmeagps import NMEAReader, NMEAParseError
from pyubx2 import UBXReader, UBXParseError, protocol
import pyubx2.ubxtypes_core as ubt
from pygpsclient.globals import (
CONNECTED,
CONNECTED_FILE,
DISCONNECTED,
CRLF,
)
from pygpsclient.strings import NOTCONN, SEROPENERROR, ENDOFFILE
LOGGING = logging.WARNING
class SerialHandler:
"""
Serial handler class.
"""
def __init__(self, app):
"""
Constructor.
:param Frame app: reference to main tkinter application
"""
self.__app = app # Reference to main application class
self.__master = self.__app.get_master() # Reference to root class (Tk)
self._serial_object = None
self._serial_buffer = None
self._serial_thread = None
self._file_thread = None
self._connected = False
self._reading = False
logging.basicConfig(
format="%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s",
level=LOGGING,
)
def __del__(self):
"""
Destructor.
"""
if self._serial_thread is not None:
self._reading = False
self._serial_thread = None
self.disconnect()
def connect(self):
"""
Open serial connection.
"""
# pylint: disable=consider-using-with
serial_settings = self.__app.frm_settings.serial_settings()
if serial_settings.status == 3: # NOPORTS
return
try:
self._serial_object = Serial(
serial_settings.port,
serial_settings.bpsrate,
bytesize=serial_settings.databits,
stopbits=serial_settings.stopbits,
parity=serial_settings.parity,
xonxoff=serial_settings.xonxoff,
rtscts=serial_settings.rtscts,
timeout=serial_settings.timeout,
)
self._serial_buffer = BufferedReader(self._serial_object)
self.__app.frm_banner.update_conn_status(CONNECTED)
self.__app.set_connection(
(
f"{serial_settings.port}:{serial_settings.port_desc} "
+ f"@ {str(serial_settings.bpsrate)}"
),
"green",
)
self.__app.frm_settings.enable_controls(CONNECTED)
self._connected = True
self.start_read_thread()
if self.__app.frm_settings.datalogging:
self.__app.file_handler.open_logfile()
if self.__app.frm_settings.record_track:
self.__app.file_handler.open_trackfile()
self.__app.set_status("Connected", "blue")
except (IOError, SerialException, SerialTimeoutException) as err:
self._connected = False
self.__app.set_connection(
(
f"{serial_settings.port}:{serial_settings.port_desc} "
+ f"@ {str(serial_settings.bpsrate)}"
),
"red",
)
self.__app.set_status(SEROPENERROR.format(err), "red")
self.__app.frm_banner.update_conn_status(DISCONNECTED)
self.__app.frm_settings.enable_controls(DISCONNECTED)
def connect_file(self):
"""
Open binary data file connection.
"""
# pylint: disable=consider-using-with
in_filepath = self.__app.frm_settings.infilepath
if in_filepath is None:
return
try:
self._serial_object = open(in_filepath, "rb")
self._serial_buffer = BufferedReader(self._serial_object)
self.__app.frm_banner.update_conn_status(CONNECTED_FILE)
self.__app.set_connection(f"{in_filepath}", "blue")
self.__app.frm_settings.enable_controls(CONNECTED_FILE)
self._connected = True
self.start_readfile_thread()
if self.__app.frm_settings.datalogging:
self.__app.file_handler.open_logfile()
if self.__app.frm_settings.record_track:
self.__app.file_handler.open_trackfile()
except (IOError, SerialException, SerialTimeoutException) as err:
self._connected = False
self.__app.set_connection(f"{in_filepath}", "red")
self.__app.set_status(SEROPENERROR.format(err), "red")
self.__app.frm_banner.update_conn_status(DISCONNECTED)
self.__app.frm_settings.enable_controls(DISCONNECTED)
def disconnect(self):
"""
Close serial connection.
"""
if self._connected:
try:
self._reading = False
self._serial_object.close()
self.__app.frm_banner.update_conn_status(DISCONNECTED)
self.__app.set_connection(NOTCONN, "red")
self.__app.set_status("", "blue")
if self.__app.frm_settings.datalogging:
self.__app.file_handler.close_logfile()
if self.__app.frm_settings.record_track:
self.__app.file_handler.close_trackfile()
except (SerialException, SerialTimeoutException):
pass
self._connected = False
self.__app.frm_settings.enable_controls(self._connected)
@property
def port(self):
"""
Getter for port
"""
return self.__app.frm_settings.serial_settings().port
@property
def connected(self):
"""
Getter for connection status
"""
return self._connected
@property
def serial(self):
"""
Getter for serial object
"""
return self._serial_object
@property
def buffer(self):
"""
Getter for serial buffer
"""
return self._serial_buffer
@property
def thread(self):
"""
Getter for serial thread
"""
return self._serial_thread
def serial_write(self, data: bytes):
"""
Write binary data to serial port.
:param bytes data: data to write to stream
"""
try:
self._serial_object.write(data)
except (SerialException, SerialTimeoutException) as err:
print(f"Error writing to serial port {err}")
def start_read_thread(self):
"""
Start the serial reader thread.
"""
if self._connected:
self._reading = True
self.__app.frm_mapview.reset_map_refresh()
self._serial_thread = Thread(target=self._read_thread, daemon=True)
self._serial_thread.start()
def start_readfile_thread(self):
"""
Start the file reader thread.
"""
if self._connected:
self._reading = True
self.__app.frm_mapview.reset_map_refresh()
self._file_thread = Thread(target=self._readfile_thread, daemon=True)
self._file_thread.start()
def stop_read_thread(self):
"""
Stop serial reader thread.
"""
if self._serial_thread is not None:
self._reading = False
self._serial_thread = None
# self.__app.set_status(STOPDATA, "red")
def stop_readfile_thread(self):
"""
Stop file reader thread.
"""
if self._file_thread is not None:
self._reading = False
self._file_thread = None
# self.__app.set_status(STOPDATA, "red")
def _read_thread(self):
"""
THREADED PROCESS
Reads binary data from serial port and generates virtual event to
trigger data parsing and widget updates.
"""
try:
while self._reading and self._serial_object:
if self._serial_object.in_waiting:
self.__master.event_generate("<<ubx_read>>")
except SerialException as err:
self.__app.set_status(f"Error in read thread {err}", "red")
# spurious errors as thread shuts down after serial disconnection
except (TypeError, OSError):
pass
def _readfile_thread(self):
"""
THREADED PROCESS
Reads binary data from datalog file and generates virtual event to
trigger data parsing and widget updates.
"""
while self._reading and self._serial_object:
self.__master.event_generate("<<ubx_readfile>>")
def on_read(self, event): # pylint: disable=unused-argument
"""
Action on <<ubx_read>> event - read any data in the buffer.
:param event event: read event
"""
if self._reading and self._serial_object is not None:
try:
self._parse_data(self._serial_buffer)
except SerialException as err:
self.__app.set_status(f"Error {err}", "red")
def on_eof(self, event): # pylint: disable=unused-argument
"""
Action on end of file
:param event event: eof event
"""
self.disconnect()
self.__app.set_status(ENDOFFILE, "blue")
def _parse_data(self, stream: object):
"""
Read the binary data and direct to the appropriate
UBX and/or NMEA protocol handler, depending on which protocols
are filtered.
:param Serial ser: serial port
"""
parsing = True
raw_data = None
parsed_data = None
protfilter = self.__app.frm_settings.protocol
try:
while parsing: # loop until end of valid UBX/NMEA message or EOF
byte1 = stream.read(1) # read first byte to determine protocol
if len(byte1) < 1:
raise EOFError
if byte1 not in (
b"\xb5",
b"\x24",
): # not UBX or NMEA, discard and continue
continue
byte2 = stream.read(1)
if len(byte2) < 1:
raise EOFError
# if it's a UBX message (b'\b5\x62')
bytehdr = byte1 + byte2
if bytehdr == ubt.UBX_HDR:
byten = stream.read(4)
if len(byten) < 4:
raise EOFError
clsid = byten[0:1]
msgid = byten[1:2]
lenb = byten[2:4]
leni = int.from_bytes(lenb, "little", signed=False)
byten = stream.read(leni + 2)
if len(byten) < leni + 2:
raise EOFError
plb = byten[0:leni]
cksum = byten[leni : leni + 2]
raw_data = bytehdr + clsid + msgid + lenb + plb + cksum
parsed_data = UBXReader.parse(raw_data)
parsing = False
# if it's an NMEA GNSS message ('$G' or '$P')
elif bytehdr in ubt.NMEA_HDR:
byten = stream.readline()
if byten[-2:] != CRLF:
raise EOFError
raw_data = bytehdr + byten
parsed_data = NMEAReader.parse(raw_data)
parsing = False
# else drop it like it's hot
else:
parsing = False
except EOFError:
self.__master.event_generate("<<ubx_eof>>")
return
except (UBXParseError, NMEAParseError) as err:
# log errors to console, then continue
self.__app.frm_console.update_console(bytes(str(err), "utf-8"), err)
return
logging.debug("raw: %s parsed: %s", raw_data, parsed_data)
if raw_data is None or parsed_data is None:
return
msgprot = protocol(raw_data)
if msgprot == ubt.UBX_PROTOCOL and msgprot & protfilter:
self.__app.frm_console.update_console(raw_data, parsed_data)
self.__app.ubx_handler.process_data(raw_data, parsed_data)
elif msgprot == ubt.NMEA_PROTOCOL and msgprot & protfilter:
self.__app.frm_console.update_console(raw_data, parsed_data)
self.__app.nmea_handler.process_data(raw_data, parsed_data)
elif msgprot == 0 and protfilter == 3:
# log unknown protocol headers to console, then continue
self.__app.frm_console.update_console(raw_data, parsed_data)
# if datalogging, write to log file
if self.__app.frm_settings.datalogging:
self.__app.file_handler.write_logfile(raw_data, parsed_data)
def flush(self):
"""
Flush input buffer
"""
if self._serial_buffer is not None:
self._serial_buffer.flush()
if self._serial_object is not None:
self._serial_object.flushInput()
|
test_signal.py | import enum
import errno
import inspect
import os
import random
import signal
import socket
import statistics
import subprocess
import sys
import threading
import time
import unittest
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, spawn_python
from test.support import threading_helper
try:
import _testcapi
except ImportError:
_testcapi = None
class GenericTests(unittest.TestCase):
def test_enums(self):
for name in dir(signal):
sig = getattr(signal, name)
if name in {'SIG_DFL', 'SIG_IGN'}:
self.assertIsInstance(sig, signal.Handlers)
elif name in {'SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'}:
self.assertIsInstance(sig, signal.Sigmasks)
elif name.startswith('SIG') and not name.startswith('SIG_'):
self.assertIsInstance(sig, signal.Signals)
elif name.startswith('CTRL_'):
self.assertIsInstance(sig, signal.Signals)
self.assertEqual(sys.platform, "win32")
CheckedSignals = enum._old_convert_(
enum.IntEnum, 'Signals', 'signal',
lambda name:
name.isupper()
and (name.startswith('SIG') and not name.startswith('SIG_'))
or name.startswith('CTRL_'),
source=signal,
)
enum._test_simple_enum(CheckedSignals, signal.Signals)
CheckedHandlers = enum._old_convert_(
enum.IntEnum, 'Handlers', 'signal',
lambda name: name in ('SIG_DFL', 'SIG_IGN'),
source=signal,
)
enum._test_simple_enum(CheckedHandlers, signal.Handlers)
Sigmasks = getattr(signal, 'Sigmasks', None)
if Sigmasks is not None:
CheckedSigmasks = enum._old_convert_(
enum.IntEnum, 'Sigmasks', 'signal',
lambda name: name in ('SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'),
source=signal,
)
enum._test_simple_enum(CheckedSigmasks, Sigmasks)
def test_functions_module_attr(self):
# Issue #27718: If __all__ is not defined all non-builtin functions
# should have correct __module__ to be displayed by pydoc.
for name in dir(signal):
value = getattr(signal, name)
if inspect.isroutine(value) and not inspect.isbuiltin(value):
self.assertEqual(value.__module__, 'signal')
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class PosixTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
self.assertRaises(ValueError, signal.strsignal, 4242)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertIsInstance(hup, signal.Handlers)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
def test_strsignal(self):
self.assertIn("Interrupt", signal.strsignal(signal.SIGINT))
self.assertIn("Terminated", signal.strsignal(signal.SIGTERM))
self.assertIn("Hangup", signal.strsignal(signal.SIGHUP))
# Issue 3864, unknown if this affects earlier versions of freebsd also
def test_interprocess_signal(self):
dirname = os.path.dirname(__file__)
script = os.path.join(dirname, 'signalinterproctester.py')
assert_python_ok(script)
def test_valid_signals(self):
s = signal.valid_signals()
self.assertIsInstance(s, set)
self.assertIn(signal.Signals.SIGINT, s)
self.assertIn(signal.Signals.SIGALRM, s)
self.assertNotIn(0, s)
self.assertNotIn(signal.NSIG, s)
self.assertLess(len(s), signal.NSIG)
# gh-91145: Make sure that all SIGxxx constants exposed by the Python
# signal module have a number in the [0; signal.NSIG-1] range.
for name in dir(signal):
if not name.startswith("SIG"):
continue
with self.subTest(name=name):
signum = getattr(signal, name)
self.assertGreaterEqual(signum, 0)
self.assertLess(signum, signal.NSIG)
@unittest.skipUnless(sys.executable, "sys.executable required.")
@support.requires_subprocess()
def test_keyboard_interrupt_exit_code(self):
"""KeyboardInterrupt triggers exit via SIGINT."""
process = subprocess.run(
[sys.executable, "-c",
"import os, signal, time\n"
"os.kill(os.getpid(), signal.SIGINT)\n"
"for _ in range(999): time.sleep(0.01)"],
stderr=subprocess.PIPE)
self.assertIn(b"KeyboardInterrupt", process.stderr)
self.assertEqual(process.returncode, -signal.SIGINT)
# Caveat: The exit code is insufficient to guarantee we actually died
# via a signal. POSIX shells do more than look at the 8 bit value.
# Writing an automation friendly test of an interactive shell
# to confirm that our process died via a SIGINT proved too complex.
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_valid_signals(self):
s = signal.valid_signals()
self.assertIsInstance(s, set)
self.assertGreaterEqual(len(s), 6)
self.assertIn(signal.Signals.SIGINT, s)
self.assertNotIn(0, s)
self.assertNotIn(signal.NSIG, s)
self.assertLess(len(s), signal.NSIG)
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
checked = set()
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows.
# Issue #18396, only for signals without a C-level handler.
if signal.getsignal(sig) is not None:
signal.signal(sig, signal.signal(sig, handler))
checked.add(sig)
# Issue #18396: Ensure the above loop at least tested *something*
self.assertTrue(checked)
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
@unittest.skipUnless(sys.executable, "sys.executable required.")
@support.requires_subprocess()
def test_keyboard_interrupt_exit_code(self):
"""KeyboardInterrupt triggers an exit using STATUS_CONTROL_C_EXIT."""
# We don't test via os.kill(os.getpid(), signal.CTRL_C_EVENT) here
# as that requires setting up a console control handler in a child
# in its own process group. Doable, but quite complicated. (see
# @eryksun on https://github.com/python/cpython/pull/11862)
process = subprocess.run(
[sys.executable, "-c", "raise KeyboardInterrupt"],
stderr=subprocess.PIPE)
self.assertIn(b"KeyboardInterrupt", process.stderr)
STATUS_CONTROL_C_EXIT = 0xC000013A
self.assertEqual(process.returncode, STATUS_CONTROL_C_EXIT)
class WakeupFDTests(unittest.TestCase):
def test_invalid_call(self):
# First parameter is positional-only
with self.assertRaises(TypeError):
signal.set_wakeup_fd(signum=signal.SIGINT)
# warn_on_full_buffer is a keyword-only parameter
with self.assertRaises(TypeError):
signal.set_wakeup_fd(signal.SIGINT, False)
def test_invalid_fd(self):
fd = os_helper.make_bad_fd()
self.assertRaises((ValueError, OSError),
signal.set_wakeup_fd, fd)
def test_invalid_socket(self):
sock = socket.socket()
fd = sock.fileno()
sock.close()
self.assertRaises((ValueError, OSError),
signal.set_wakeup_fd, fd)
# Emscripten does not support fstat on pipes yet.
# https://github.com/emscripten-core/emscripten/issues/16414
@unittest.skipIf(support.is_emscripten, "Emscripten cannot fstat pipes.")
def test_set_wakeup_fd_result(self):
r1, w1 = os.pipe()
self.addCleanup(os.close, r1)
self.addCleanup(os.close, w1)
r2, w2 = os.pipe()
self.addCleanup(os.close, r2)
self.addCleanup(os.close, w2)
if hasattr(os, 'set_blocking'):
os.set_blocking(w1, False)
os.set_blocking(w2, False)
signal.set_wakeup_fd(w1)
self.assertEqual(signal.set_wakeup_fd(w2), w1)
self.assertEqual(signal.set_wakeup_fd(-1), w2)
self.assertEqual(signal.set_wakeup_fd(-1), -1)
@unittest.skipIf(support.is_emscripten, "Emscripten cannot fstat pipes.")
def test_set_wakeup_fd_socket_result(self):
sock1 = socket.socket()
self.addCleanup(sock1.close)
sock1.setblocking(False)
fd1 = sock1.fileno()
sock2 = socket.socket()
self.addCleanup(sock2.close)
sock2.setblocking(False)
fd2 = sock2.fileno()
signal.set_wakeup_fd(fd1)
self.assertEqual(signal.set_wakeup_fd(fd2), fd1)
self.assertEqual(signal.set_wakeup_fd(-1), fd2)
self.assertEqual(signal.set_wakeup_fd(-1), -1)
# On Windows, files are always blocking and Windows does not provide a
# function to test if a socket is in non-blocking mode.
@unittest.skipIf(sys.platform == "win32", "tests specific to POSIX")
@unittest.skipIf(support.is_emscripten, "Emscripten cannot fstat pipes.")
def test_set_wakeup_fd_blocking(self):
rfd, wfd = os.pipe()
self.addCleanup(os.close, rfd)
self.addCleanup(os.close, wfd)
# fd must be non-blocking
os.set_blocking(wfd, True)
with self.assertRaises(ValueError) as cm:
signal.set_wakeup_fd(wfd)
self.assertEqual(str(cm.exception),
"the fd %s must be in non-blocking mode" % wfd)
# non-blocking is ok
os.set_blocking(wfd, False)
signal.set_wakeup_fd(wfd)
signal.set_wakeup_fd(-1)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def check_wakeup(self, test_body, *signals, ordered=True):
# use a subprocess to have only one thread
code = """if 1:
import _testcapi
import os
import signal
import struct
signals = {!r}
def handler(signum, frame):
pass
def check_signum(signals):
data = os.read(read, len(signals)+1)
raised = struct.unpack('%uB' % len(data), data)
if not {!r}:
raised = set(raised)
signals = set(signals)
if raised != signals:
raise Exception("%r != %r" % (raised, signals))
{}
signal.signal(signal.SIGALRM, handler)
read, write = os.pipe()
os.set_blocking(write, False)
signal.set_wakeup_fd(write)
test()
check_signum(signals)
os.close(read)
os.close(write)
""".format(tuple(map(int, signals)), ordered, test_body)
assert_python_ok('-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_wakeup_write_error(self):
# Issue #16105: write() errors in the C signal handler should not
# pass silently.
# Use a subprocess to have only one thread.
code = """if 1:
import _testcapi
import errno
import os
import signal
import sys
from test.support import captured_stderr
def handler(signum, frame):
1/0
signal.signal(signal.SIGALRM, handler)
r, w = os.pipe()
os.set_blocking(r, False)
# Set wakeup_fd a read-only file descriptor to trigger the error
signal.set_wakeup_fd(r)
try:
with captured_stderr() as err:
signal.raise_signal(signal.SIGALRM)
except ZeroDivisionError:
# An ignored exception should have been printed out on stderr
err = err.getvalue()
if ('Exception ignored when trying to write to the signal wakeup fd'
not in err):
raise AssertionError(err)
if ('OSError: [Errno %d]' % errno.EBADF) not in err:
raise AssertionError(err)
else:
raise AssertionError("ZeroDivisionError not raised")
os.close(r)
os.close(w)
"""
r, w = os.pipe()
try:
os.write(r, b'x')
except OSError:
pass
else:
self.skipTest("OS doesn't report write() error on the read end of a pipe")
finally:
os.close(r)
os.close(w)
assert_python_ok('-c', code)
def test_wakeup_fd_early(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
class InterruptSelect(Exception):
pass
def handler(signum, frame):
raise InterruptSelect
signal.signal(signal.SIGALRM, handler)
signal.alarm(1)
# We attempt to get a signal during the sleep,
# before select is called
try:
select.select([], [], [], TIMEOUT_FULL)
except InterruptSelect:
pass
else:
raise Exception("select() was not interrupted")
before_time = time.monotonic()
select.select([read], [], [], TIMEOUT_FULL)
after_time = time.monotonic()
dt = after_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_wakeup_fd_during(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
class InterruptSelect(Exception):
pass
def handler(signum, frame):
raise InterruptSelect
signal.signal(signal.SIGALRM, handler)
signal.alarm(1)
before_time = time.monotonic()
# We attempt to get a signal during the select call
try:
select.select([read], [], [], TIMEOUT_FULL)
except InterruptSelect:
pass
else:
raise Exception("select() was not interrupted")
after_time = time.monotonic()
dt = after_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_signum(self):
self.check_wakeup("""def test():
signal.signal(signal.SIGUSR1, handler)
signal.raise_signal(signal.SIGUSR1)
signal.raise_signal(signal.SIGALRM)
""", signal.SIGUSR1, signal.SIGALRM)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pending(self):
self.check_wakeup("""def test():
signum1 = signal.SIGUSR1
signum2 = signal.SIGUSR2
signal.signal(signum1, handler)
signal.signal(signum2, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2))
signal.raise_signal(signum1)
signal.raise_signal(signum2)
# Unblocking the 2 signals calls the C signal handler twice
signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2))
""", signal.SIGUSR1, signal.SIGUSR2, ordered=False)
@unittest.skipUnless(hasattr(socket, 'socketpair'), 'need socket.socketpair')
class WakeupSocketSignalTests(unittest.TestCase):
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_socket(self):
# use a subprocess to have only one thread
code = """if 1:
import signal
import socket
import struct
import _testcapi
signum = signal.SIGINT
signals = (signum,)
def handler(signum, frame):
pass
signal.signal(signum, handler)
read, write = socket.socketpair()
write.setblocking(False)
signal.set_wakeup_fd(write.fileno())
signal.raise_signal(signum)
data = read.recv(1)
if not data:
raise Exception("no signum written")
raised = struct.unpack('B', data)
if raised != signals:
raise Exception("%r != %r" % (raised, signals))
read.close()
write.close()
"""
assert_python_ok('-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_send_error(self):
# Use a subprocess to have only one thread.
if os.name == 'nt':
action = 'send'
else:
action = 'write'
code = """if 1:
import errno
import signal
import socket
import sys
import time
import _testcapi
from test.support import captured_stderr
signum = signal.SIGINT
def handler(signum, frame):
pass
signal.signal(signum, handler)
read, write = socket.socketpair()
read.setblocking(False)
write.setblocking(False)
signal.set_wakeup_fd(write.fileno())
# Close sockets: send() will fail
read.close()
write.close()
with captured_stderr() as err:
signal.raise_signal(signum)
err = err.getvalue()
if ('Exception ignored when trying to {action} to the signal wakeup fd'
not in err):
raise AssertionError(err)
""".format(action=action)
assert_python_ok('-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_warn_on_full_buffer(self):
# Use a subprocess to have only one thread.
if os.name == 'nt':
action = 'send'
else:
action = 'write'
code = """if 1:
import errno
import signal
import socket
import sys
import time
import _testcapi
from test.support import captured_stderr
signum = signal.SIGINT
# This handler will be called, but we intentionally won't read from
# the wakeup fd.
def handler(signum, frame):
pass
signal.signal(signum, handler)
read, write = socket.socketpair()
# Fill the socketpair buffer
if sys.platform == 'win32':
# bpo-34130: On Windows, sometimes non-blocking send fails to fill
# the full socketpair buffer, so use a timeout of 50 ms instead.
write.settimeout(0.050)
else:
write.setblocking(False)
written = 0
if sys.platform == "vxworks":
CHUNK_SIZES = (1,)
else:
# Start with large chunk size to reduce the
# number of send needed to fill the buffer.
CHUNK_SIZES = (2 ** 16, 2 ** 8, 1)
for chunk_size in CHUNK_SIZES:
chunk = b"x" * chunk_size
try:
while True:
write.send(chunk)
written += chunk_size
except (BlockingIOError, TimeoutError):
pass
print(f"%s bytes written into the socketpair" % written, flush=True)
write.setblocking(False)
try:
write.send(b"x")
except BlockingIOError:
# The socketpair buffer seems full
pass
else:
raise AssertionError("%s bytes failed to fill the socketpair "
"buffer" % written)
# By default, we get a warning when a signal arrives
msg = ('Exception ignored when trying to {action} '
'to the signal wakeup fd')
signal.set_wakeup_fd(write.fileno())
with captured_stderr() as err:
signal.raise_signal(signum)
err = err.getvalue()
if msg not in err:
raise AssertionError("first set_wakeup_fd() test failed, "
"stderr: %r" % err)
# And also if warn_on_full_buffer=True
signal.set_wakeup_fd(write.fileno(), warn_on_full_buffer=True)
with captured_stderr() as err:
signal.raise_signal(signum)
err = err.getvalue()
if msg not in err:
raise AssertionError("set_wakeup_fd(warn_on_full_buffer=True) "
"test failed, stderr: %r" % err)
# But not if warn_on_full_buffer=False
signal.set_wakeup_fd(write.fileno(), warn_on_full_buffer=False)
with captured_stderr() as err:
signal.raise_signal(signum)
err = err.getvalue()
if err != "":
raise AssertionError("set_wakeup_fd(warn_on_full_buffer=False) "
"test failed, stderr: %r" % err)
# And then check the default again, to make sure warn_on_full_buffer
# settings don't leak across calls.
signal.set_wakeup_fd(write.fileno())
with captured_stderr() as err:
signal.raise_signal(signum)
err = err.getvalue()
if msg not in err:
raise AssertionError("second set_wakeup_fd() test failed, "
"stderr: %r" % err)
""".format(action=action)
assert_python_ok('-c', code)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
@unittest.skipUnless(hasattr(signal, 'siginterrupt'), "needs signal.siginterrupt()")
@support.requires_subprocess()
class SiginterruptTest(unittest.TestCase):
def readpipe_interrupted(self, interrupt):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# use a subprocess to have only one thread, to have a timeout on the
# blocking read and to not touch signal handling in this process
code = """if 1:
import errno
import os
import signal
import sys
interrupt = %r
r, w = os.pipe()
def handler(signum, frame):
1 / 0
signal.signal(signal.SIGALRM, handler)
if interrupt is not None:
signal.siginterrupt(signal.SIGALRM, interrupt)
print("ready")
sys.stdout.flush()
# run the test twice
try:
for loop in range(2):
# send a SIGALRM in a second (during the read)
signal.alarm(1)
try:
# blocking call: read from a pipe without data
os.read(r, 1)
except ZeroDivisionError:
pass
else:
sys.exit(2)
sys.exit(3)
finally:
os.close(r)
os.close(w)
""" % (interrupt,)
with spawn_python('-c', code) as process:
try:
# wait until the child process is loaded and has started
first_line = process.stdout.readline()
stdout, stderr = process.communicate(timeout=support.SHORT_TIMEOUT)
except subprocess.TimeoutExpired:
process.kill()
return False
else:
stdout = first_line + stdout
exitcode = process.wait()
if exitcode not in (2, 3):
raise Exception("Child error (exit code %s): %r"
% (exitcode, stdout))
return (exitcode == 3)
def test_without_siginterrupt(self):
# If a signal handler is installed and siginterrupt is not called
# at all, when that signal arrives, it interrupts a syscall that's in
# progress.
interrupted = self.readpipe_interrupted(None)
self.assertTrue(interrupted)
def test_siginterrupt_on(self):
# If a signal handler is installed and siginterrupt is called with
# a true value for the second argument, when that signal arrives, it
# interrupts a syscall that's in progress.
interrupted = self.readpipe_interrupted(True)
self.assertTrue(interrupted)
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
# does not interrupt a syscall that's in progress.
interrupted = self.readpipe_interrupted(False)
self.assertFalse(interrupted)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
@unittest.skipUnless(hasattr(signal, 'getitimer') and hasattr(signal, 'setitimer'),
"needs signal.getitimer() and signal.setitimer()")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
self.hndl_count += 1
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('netbsd5',),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.monotonic()
while time.monotonic() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.monotonic()
while time.monotonic() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_setitimer_tiny(self):
# bpo-30807: C setitimer() takes a microsecond-resolution interval.
# Check that float -> timeval conversion doesn't round
# the interval down to zero, which would disable the timer.
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1e-6)
time.sleep(1)
self.assertEqual(self.hndl_called, True)
class PendingSignalsTests(unittest.TestCase):
"""
Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait()
functions.
"""
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending_empty(self):
self.assertEqual(signal.sigpending(), set())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending(self):
code = """if 1:
import os
import signal
def handler(signum, frame):
1/0
signum = signal.SIGUSR1
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
os.kill(os.getpid(), signum)
pending = signal.sigpending()
for sig in pending:
assert isinstance(sig, signal.Signals), repr(pending)
if pending != {signum}:
raise Exception('%s != {%s}' % (pending, signum))
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
@threading_helper.requires_working_threading()
def test_pthread_kill(self):
code = """if 1:
import signal
import threading
import sys
signum = signal.SIGUSR1
def handler(signum, frame):
1/0
signal.signal(signum, handler)
tid = threading.get_ident()
try:
signal.pthread_kill(tid, signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def wait_helper(self, blocked, test):
"""
test: body of the "def test(signum):" function.
blocked: number of the blocked signal
"""
code = '''if 1:
import signal
import sys
from signal import Signals
def handler(signum, frame):
1/0
%s
blocked = %s
signum = signal.SIGALRM
# child: block and wait the signal
try:
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [blocked])
# Do the tests
test(signum)
# The handler must not be called on unblock
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [blocked])
except ZeroDivisionError:
print("the signal handler has been called",
file=sys.stderr)
sys.exit(1)
except BaseException as err:
print("error: {}".format(err), file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
''' % (test.strip(), blocked)
# sig*wait* must be called with the signal blocked: since the current
# process might have several threads running, use a subprocess to have
# a single thread.
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
def test_sigwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
received = signal.sigwait([signum])
assert isinstance(received, signal.Signals), received
if received != signum:
raise Exception('received %s, not %s' % (received, signum))
''')
@unittest.skipUnless(hasattr(signal, 'sigwaitinfo'),
'need signal.sigwaitinfo()')
def test_sigwaitinfo(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigwaitinfo([signum])
if info.si_signo != signum:
raise Exception("info.si_signo != %s" % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigtimedwait([signum], 10.1000)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_poll(self):
# check that polling with sigtimedwait works
self.wait_helper(signal.SIGALRM, '''
def test(signum):
import os
os.kill(os.getpid(), signum)
info = signal.sigtimedwait([signum], 0)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_timeout(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
received = signal.sigtimedwait([signum], 1.0)
if received is not None:
raise Exception("received=%r" % (received,))
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_negative_timeout(self):
signum = signal.SIGALRM
self.assertRaises(ValueError, signal.sigtimedwait, [signum], -1.0)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@threading_helper.requires_working_threading()
def test_sigwait_thread(self):
# Check that calling sigwait() from a thread doesn't suspend the whole
# process. A new interpreter is spawned to avoid problems when mixing
# threads and fork(): only async-safe functions are allowed between
# fork() and exec().
assert_python_ok("-c", """if True:
import os, threading, sys, time, signal
# the default handler terminates the process
signum = signal.SIGUSR1
def kill_later():
# wait until the main thread is waiting in sigwait()
time.sleep(1)
os.kill(os.getpid(), signum)
# the signal must be blocked by all the threads
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
killer = threading.Thread(target=kill_later)
killer.start()
received = signal.sigwait([signum])
if received != signum:
print("sigwait() received %s, not %s" % (received, signum),
file=sys.stderr)
sys.exit(1)
killer.join()
# unblock the signal, which should have been cleared by sigwait()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
""")
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_arguments(self):
self.assertRaises(TypeError, signal.pthread_sigmask)
self.assertRaises(TypeError, signal.pthread_sigmask, 1)
self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3)
self.assertRaises(OSError, signal.pthread_sigmask, 1700, [])
with self.assertRaises(ValueError):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.NSIG])
with self.assertRaises(ValueError):
signal.pthread_sigmask(signal.SIG_BLOCK, [0])
with self.assertRaises(ValueError):
signal.pthread_sigmask(signal.SIG_BLOCK, [1<<1000])
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_valid_signals(self):
s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
self.addCleanup(signal.pthread_sigmask, signal.SIG_SETMASK, s)
# Get current blocked set
s = signal.pthread_sigmask(signal.SIG_UNBLOCK, signal.valid_signals())
self.assertLessEqual(s, signal.valid_signals())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@threading_helper.requires_working_threading()
def test_pthread_sigmask(self):
code = """if 1:
import signal
import os; import threading
def handler(signum, frame):
1/0
def kill(signum):
os.kill(os.getpid(), signum)
def check_mask(mask):
for sig in mask:
assert isinstance(sig, signal.Signals), repr(sig)
def read_sigmask():
sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, [])
check_mask(sigmask)
return sigmask
signum = signal.SIGUSR1
# Install our signal handler
old_handler = signal.signal(signum, handler)
# Unblock SIGUSR1 (and copy the old mask) to test our signal handler
old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
check_mask(old_mask)
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Block and then raise SIGUSR1. The signal is blocked: the signal
# handler is not called, and the signal is now pending
mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
check_mask(mask)
kill(signum)
# Check the new mask
blocked = read_sigmask()
check_mask(blocked)
if signum not in blocked:
raise Exception("%s not in %s" % (signum, blocked))
if old_mask ^ blocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (old_mask, blocked, signum))
# Unblock SIGUSR1
try:
# unblock the pending signal calls immediately the signal handler
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Check the new mask
unblocked = read_sigmask()
if signum in unblocked:
raise Exception("%s in %s" % (signum, unblocked))
if blocked ^ unblocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (blocked, unblocked, signum))
if old_mask != unblocked:
raise Exception("%s != %s" % (old_mask, unblocked))
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
@threading_helper.requires_working_threading()
def test_pthread_kill_main_thread(self):
# Test that a signal can be sent to the main thread with pthread_kill()
# before any other thread has been created (see issue #12392).
code = """if True:
import threading
import signal
import sys
def handler(signum, frame):
sys.exit(3)
signal.signal(signal.SIGUSR1, handler)
signal.pthread_kill(threading.get_ident(), signal.SIGUSR1)
sys.exit(2)
"""
with spawn_python('-c', code) as process:
stdout, stderr = process.communicate()
exitcode = process.wait()
if exitcode != 3:
raise Exception("Child error (exit code %s): %s" %
(exitcode, stdout))
class StressTest(unittest.TestCase):
"""
Stress signal delivery, especially when a signal arrives in
the middle of recomputing the signal state or executing
previously tripped signal handlers.
"""
def setsig(self, signum, handler):
old_handler = signal.signal(signum, handler)
self.addCleanup(signal.signal, signum, old_handler)
def measure_itimer_resolution(self):
N = 20
times = []
def handler(signum=None, frame=None):
if len(times) < N:
times.append(time.perf_counter())
# 1 µs is the smallest possible timer interval,
# we want to measure what the concrete duration
# will be on this platform
signal.setitimer(signal.ITIMER_REAL, 1e-6)
self.addCleanup(signal.setitimer, signal.ITIMER_REAL, 0)
self.setsig(signal.SIGALRM, handler)
handler()
while len(times) < N:
time.sleep(1e-3)
durations = [times[i+1] - times[i] for i in range(len(times) - 1)]
med = statistics.median(durations)
if support.verbose:
print("detected median itimer() resolution: %.6f s." % (med,))
return med
def decide_itimer_count(self):
# Some systems have poor setitimer() resolution (for example
# measured around 20 ms. on FreeBSD 9), so decide on a reasonable
# number of sequential timers based on that.
reso = self.measure_itimer_resolution()
if reso <= 1e-4:
return 10000
elif reso <= 1e-2:
return 100
else:
self.skipTest("detected itimer resolution (%.3f s.) too high "
"(> 10 ms.) on this platform (or system too busy)"
% (reso,))
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_dependent(self):
"""
This test uses dependent signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def first_handler(signum, frame):
# 1e-6 is the minimum non-zero value for `setitimer()`.
# Choose a random delay so as to improve chances of
# triggering a race condition. Ideally the signal is received
# when inside critical signal-handling routines such as
# Py_MakePendingCalls().
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
def second_handler(signum=None, frame=None):
sigs.append(signum)
# Here on Linux, SIGPROF > SIGALRM > SIGUSR1. By using both
# ascending and descending sequences (SIGUSR1 then SIGALRM,
# SIGPROF then SIGALRM), we maximize chances of hitting a bug.
self.setsig(signal.SIGPROF, first_handler)
self.setsig(signal.SIGUSR1, first_handler)
self.setsig(signal.SIGALRM, second_handler) # for ITIMER_REAL
expected_sigs = 0
deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N:
os.kill(os.getpid(), signal.SIGPROF)
expected_sigs += 1
# Wait for handlers to run to avoid signal coalescing
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
os.kill(os.getpid(), signal.SIGUSR1)
expected_sigs += 1
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_simultaneous(self):
"""
This test uses simultaneous signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def handler(signum, frame):
sigs.append(signum)
self.setsig(signal.SIGUSR1, handler)
self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL
expected_sigs = 0
deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N:
# Hopefully the SIGALRM will be received somewhere during
# initial processing of SIGUSR1.
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
os.kill(os.getpid(), signal.SIGUSR1)
expected_sigs += 2
# Wait for handlers to run to avoid signal coalescing
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
@unittest.skipUnless(hasattr(signal, "SIGUSR1"),
"test needs SIGUSR1")
@threading_helper.requires_working_threading()
def test_stress_modifying_handlers(self):
# bpo-43406: race condition between trip_signal() and signal.signal
signum = signal.SIGUSR1
num_sent_signals = 0
num_received_signals = 0
do_stop = False
def custom_handler(signum, frame):
nonlocal num_received_signals
num_received_signals += 1
def set_interrupts():
nonlocal num_sent_signals
while not do_stop:
signal.raise_signal(signum)
num_sent_signals += 1
def cycle_handlers():
while num_sent_signals < 100:
for i in range(20000):
# Cycle between a Python-defined and a non-Python handler
for handler in [custom_handler, signal.SIG_IGN]:
signal.signal(signum, handler)
old_handler = signal.signal(signum, custom_handler)
self.addCleanup(signal.signal, signum, old_handler)
t = threading.Thread(target=set_interrupts)
try:
ignored = False
with support.catch_unraisable_exception() as cm:
t.start()
cycle_handlers()
do_stop = True
t.join()
if cm.unraisable is not None:
# An unraisable exception may be printed out when
# a signal is ignored due to the aforementioned
# race condition, check it.
self.assertIsInstance(cm.unraisable.exc_value, OSError)
self.assertIn(
f"Signal {signum:d} ignored due to race condition",
str(cm.unraisable.exc_value))
ignored = True
# bpo-43406: Even if it is unlikely, it's technically possible that
# all signals were ignored because of race conditions.
if not ignored:
# Sanity check that some signals were received, but not all
self.assertGreater(num_received_signals, 0)
self.assertLess(num_received_signals, num_sent_signals)
finally:
do_stop = True
t.join()
class RaiseSignalTest(unittest.TestCase):
def test_sigint(self):
with self.assertRaises(KeyboardInterrupt):
signal.raise_signal(signal.SIGINT)
@unittest.skipIf(sys.platform != "win32", "Windows specific test")
def test_invalid_argument(self):
try:
SIGHUP = 1 # not supported on win32
signal.raise_signal(SIGHUP)
self.fail("OSError (Invalid argument) expected")
except OSError as e:
if e.errno == errno.EINVAL:
pass
else:
raise
def test_handler(self):
is_ok = False
def handler(a, b):
nonlocal is_ok
is_ok = True
old_signal = signal.signal(signal.SIGINT, handler)
self.addCleanup(signal.signal, signal.SIGINT, old_signal)
signal.raise_signal(signal.SIGINT)
self.assertTrue(is_ok)
class PidfdSignalTest(unittest.TestCase):
@unittest.skipUnless(
hasattr(signal, "pidfd_send_signal"),
"pidfd support not built in",
)
def test_pidfd_send_signal(self):
with self.assertRaises(OSError) as cm:
signal.pidfd_send_signal(0, signal.SIGINT)
if cm.exception.errno == errno.ENOSYS:
self.skipTest("kernel does not support pidfds")
elif cm.exception.errno == errno.EPERM:
self.skipTest("Not enough privileges to use pidfs")
self.assertEqual(cm.exception.errno, errno.EBADF)
my_pidfd = os.open(f'/proc/{os.getpid()}', os.O_DIRECTORY)
self.addCleanup(os.close, my_pidfd)
with self.assertRaisesRegex(TypeError, "^siginfo must be None$"):
signal.pidfd_send_signal(my_pidfd, signal.SIGINT, object(), 0)
with self.assertRaises(KeyboardInterrupt):
signal.pidfd_send_signal(my_pidfd, signal.SIGINT)
def tearDownModule():
support.reap_children()
if __name__ == "__main__":
unittest.main()
|
web.py | import socket
import threading
import queue
import time
import re
import json
import sys
import kafka
from loguru import logger
from rich.console import Console
from kafka import KafkaProducer
console = Console()
config = {
"handlers": [
{"sink": sys.stdout, "level": "INFO"} # , format="{time:YYYY-MM-DD at HH:mm:ss} [{level}] {message}"},
]
}
logger.configure(**config)
TMP_DIR = '/tmp/kafka.api/'
PREFIX_TMP_NAME = 'kfk-'
KAFKA_TOPIC = 'proxy.py'
class InvalidRequest(Exception):
pass
def HTTP_parser(raw):
temp = [i.strip() for i in raw.splitlines()]
if -1 == temp[0].find('HTTP'):
raise InvalidRequest('Incorrect Protocol')
method, path, protocol = [i.strip() for i in temp[0].split()]
headers = {}
if ('GET' == method) or ('POST' == method):
raw_headers = list(
filter(
lambda h: re.findall(r"[\w+|-]{1,20}:(.*)", h),
temp[1:-1]
)
)
for k, v in [i.split(':', 1) for i in raw_headers]:
headers[k.strip()] = v.strip()
else:
raise InvalidRequest('Only accepts GET requests')
try:
body = json.loads(temp[-1])
except json.JSONDecoder:
body = temp[-1]
return method, path, protocol, headers, body
class WEBServer:
def __init__(
self,
host='0.0.0.0',
port=8000,
bootstrap_servers: list or str = None) -> None:
self.host = host
self.port = port
self.socket = None
self.q = queue.Queue(maxsize=100)
self.bootstrap_servers = bootstrap_servers if bootstrap_servers is not None else ['kafka:9092']
self.kafka_enable = False
self.CONNECTION_COUNT = 100
self.PACKET_SIZE = 1024
def up(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.bind((self.host, self.port))
logger.info("server started at -- {0}:{1}".format(self.host, self.port))
except Exception as err:
logger.critical(err)
exit(1)
threading.Thread(target=self.__worker).start()
self.__listen()
def __worker(self):
time.sleep(30)
logger.info('worker is running...')
try:
producer = KafkaProducer(bootstrap_servers=self.bootstrap_servers)
except kafka.errors.NoBrokersAvailable as err:
logger.critical('No Broker Available in %s' % self.bootstrap_servers)
return
else:
self.kafka_enable = True
while True:
try:
body = self.q.get(timeout=3)
except queue.Empty:
time.sleep(1)
else:
try:
future = producer.send(KAFKA_TOPIC, json.dumps(body).encode())
record_metadata = future.get(timeout=10)
except kafka.errors.KafkaTimeoutError as err:
logger.critical('[%s] %s' % (body, err))
except kafka.errors.KafkaError as err:
logger.critical('ERR:%s:%s' % (err, body))
else:
logger.info('successful [%s]' % body)
def __listen(self):
self.socket.listen(self.CONNECTION_COUNT)
while True:
try:
sock_cli, sock_addr = self.socket.accept()
sock_cli.settimeout(10)
threading.Thread(target=self.__handleClient, args=(sock_cli, sock_addr)).start()
except KeyboardInterrupt:
logger.info('shutdown... bye bye')
def __handleClient(self, socket_cli, socket_address):
while True:
# raw_data = socket_cli.recv(self.PACKET_SIZE).decode()
buff_size = 4096
raw_data = b''
while True:
part = socket_cli.recv(buff_size)
raw_data += part
if len(part) < buff_size:
break
if not raw_data:
break
raw_data = raw_data.decode('utf-8', errors='ignore')
try:
response = ("HTTP/1.1 200 OK\n"
"Content-Type: text/html\n"
"Server: Python\n"
"Connection: close\n\n"
"{\"code\": 200}")
parser = HTTP_parser(raw_data)
body = parser[-1]
body = {} if not isinstance(body, dict) else body
logger.info(
'%s%s "%s %s%s %s %d" "-" "%s"' % (
'' if not body.get('ip') else '%s|' % (
body.get('ip')[0] if len(body.get('ip')) > 1 else body.get('ip')
),
socket_address[0],
parser[0] if not body.get('method') else body.get('method'),
'' if not body.get('host') else body.get('host'),
parser[1] if not body.get('path') else body.get('path'), parser[2],
len(raw_data),
parser[3].get('User-Agent')
)
)
if self.kafka_enable:
self.q.put(parser[-1])
socket_cli.send(response.encode())
except Exception as err:
logger.error('handler client: %s' % err)
socket_cli.close()
break
def shutdown(self):
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
|
myserver.py | import socket
from queue import Queue
import threading
import random
import os
import pygame
import sys
import select
import time
os.chdir("/Users/yuanzhendong/desktop/tp3")
sys.path.append("/Users/yuanzhendong/desktop/tp3")
from client.myclient import *
class Struct(object): pass
data1 = Struct()
##############credit to Rohan for his tutorial
def init(data1):
data1.start=True
data1.elapsed=0
BACKLOG=4
host =""
port =50003
init(data1)
server=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind((host,port))
server.listen(BACKLOG)
print('wait for connection')
class gameServer():
def __init__(self):
self.bullet=[]
self.players=dict()
self.serverCoin=[]
self.moreCoin=[]
self.mines=[]
self.val=[10,20,30,50,100,500]
self.leftClient=[]
# generate coin for all players
def coinGen(self):
self.moreCoin=[]
playerNum=len(clients)
coins=playerNum*25
if (len(self.serverCoin)==coins):
pass
else:
lacking=coins-len(self.serverCoin)
if (lacking>0):
for eachLack in range(lacking):
randomX,randomY=random.randint(200,2800),random.randint(200,2800)
randVal=random.randint(0,100)
if (randVal<35): type=0
elif (randVal<=60): type=1
elif (randVal<=75): type=2
elif (randVal<=90): type=3
elif (randVal<=98): type=4
else: type=5
self.moreCoin.append((type,randomX,randomY))
for clientID in clients:
sendMsg="coinCome"+" "+str(self.moreCoin)+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
self.serverCoin+=self.moreCoin
#central position and condition update for each player
def centralPos(self):
pygame.time.delay(55)
for eachID in self.players:
player=self.players[eachID]
tempX,tempY,tempAngle=player.posX,player.posY,player.angle
player.posY-=player.curSpeed*math.sin(conv(player.angle))
player.posX+=player.curSpeed*math.cos(conv(player.angle))
flag=True
for x in range(-40,41,1):
for y in range(-40,41,1):
posX,posY=round(player.posX+y),round(player.posY+x)
if ((posX,posY) in data.map2):
player.blood-=3
flag=False
if not (39<=player.posY<=2961 and 39<=player.posX<=2961):
flag=False
for eachID2 in self.players:
player2=self.players[eachID2]
if (eachID2!=eachID and heruistics(player.posX,player.posY,player2.posX,player2.posY)<80):
flag=False
if (flag==False):
player.posY=tempY
player.posX=tempX
player.curSpeed=4
if (len(self.bullet)>0):
for eachBul in self.bullet:
remainingMove=eachBul[2]
if (remainingMove>0):
bulPixelX=round(eachBul[0][0])
bulPixelY=round(eachBul[0][1])
direction=eachBul[1]
eachBul[0][0]+=25*math.cos(direction)
eachBul[0][1]-=25*math.sin(direction)
eachBul[2]-=1
if (bulPixelX>3000 or bulPixelY>3000 or bulPixelX<0 or bulPixelY<0 or
eachBul[2]<=0 or (bulPixelX,bulPixelY) in data.map2 or remainingMove==0):
self.bullet.remove(eachBul)
def centralCondition(self):
pygame.time.delay(55)
for eachID in self.players:
player=self.players[eachID]
if (player.blood+player.recover>=player.Hull):
player.blood=player.Hull
else:
player.blood+=player.recover
if (player.left==True):
player.angle+=player.turn
elif (player.right==True):
player.angle-=player.turn
if (player.accler==True):
if(player.curSpeed +2 <player.speed):
player.curSpeed+=2
else:
player.curSpeed=player.speed
if (player.decler==True) :
if (player.curSpeed-2 >data.minSpeed):
player.curSpeed-=2
else:
player.curSpeed=data.minSpeed
if (player.blood<=0):
del self.players[eachID]
for clientID in clients:
sendMsg="die"+" "+str(player.name)+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
def collisionCheck(self):
# if not divine shield
pygame.time.delay(30)
for eachCoin in self.serverCoin:
for eachID in self.players:
eachPlayer=self.players[eachID]
xpos,ypos=eachPlayer.posX,eachPlayer.posY
if (heruistics(eachCoin[1],eachCoin[2],xpos,ypos)<52):
eachPlayer.score+=self.val[eachCoin[0]]
for clientID in clients:
sendMsg="score"+" "+str(eachID)+" "+"%d %d %d"%(eachCoin[0],eachCoin[1],eachCoin[2])+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
self.serverCoin.remove(eachCoin)
for eachBul in self.bullet:
bulX,bulY,dmg=eachBul[0][0],eachBul[0][1],eachBul[3]
for eachID in self.players:
eachPlayer=self.players[eachID]
xpos,ypos=eachPlayer.posX,eachPlayer.posY
if (heruistics(xpos,ypos,bulX,bulY)<52):
eachPlayer.blood-=eachBul[3]
for clientID in clients:
sendMsg="lessHealth"+" "+str(eachID)+" "+str(eachPlayer.blood)+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
self.bullet.remove(eachBul)
for eachMine in self.mines:
mX,mY=eachMine[0][0],eachMine[0][1]
for eachID in self.players:
eachPlayer=self.players[eachID]
xpos,ypos=eachPlayer.posX,eachPlayer.posY
if (heruistics(mX,mY,xpos,ypos)<52):
dmg=eachMine[2]
eachPlayer.blood-=dmg
for clientID in clients:
sendMsg="lessHealth"+" "+str(eachID)+" "+str(eachPlayer.blood)+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
self.mines.remove(eachMine)
#
# for eachID in self.players:
# player1=self.players[eachID]
# for eachID2 in self.players:
# if (eachID2!=eachID):
# player2=self.players[eachID]
# if (heuristics(player1.posX,player1.posY,player2.posX,player.posY)<80):
# player2.health-=player1.collision
# for client in clients:
# sendMsg="lessHealth"+" "+str(eachID2)+" "+str(eachPlayer.health)+"\n"
# clients[clientID].send(bytes(sendMsg,"UTF-8"))
#
#receive the messsage from the client and process to send back
def threaded_client(self,client,channel,clientID,clients):
msg=""
client.setblocking(1)
while True:
rlist,wlist,xlist=select.select([client],[],[])
if (len(rlist)>0):
try:
msg+=client.recv(512).decode('UTF-8')
actualMsg=msg.split("\n")
while (len(actualMsg)>1):
readyMsg=actualMsg[0]
msg="\n".join(actualMsg[1:])
serverChannel.put(str(clientID)+"_"+readyMsg)
actualMsg=msg.split("\n")
except:
clients.pop(clientID)
return
def sendToAll(self):
posMsg=""
for eachID in self.players:
player=self.players[eachID]
posMsg+="pos %d"%eachID+" "+"%d %d %d"%(player.posX,player.posY,player.angle)+"\n"
for clientID in clients:
clients[clientID].send(bytes(posMsg,"UTF-8"))
def serverThread(self,clients,serverChannel):
while True:
msg=serverChannel.get(True,None)
msgProcess=msg.split("_")
senderID=int(msgProcess[0])
useless=len(msgProcess[0])+1
realMsg=msg[useless:]
self.leftClient=[]
if (msg):
for clientID in clients:
if (clientID!=senderID):
if (data1.start==True):
if (realMsg.startswith("readyState")):
sendMsg="state" +" "+str(senderID)+" " +realMsg[11:]+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
elif (realMsg.startswith("ship")):
sendMsg="inf" +" "+str(senderID)+" " +realMsg[5:]+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
# info=realMsg.split(" ")
# name,posX,posY,image=str(info[1]),int(info[2]),int(info[3]),str(info[4])
# self.players[senderID]=playerShip(name,posX,posY,image)
elif (realMsg.startswith("game")):
data1.start=False
else:
if (realMsg.startswith("Start")):
real=realMsg[6:]
sendMsg="start" +" "+str(senderID)+" " +real+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
# player=self.players[senderID]
# real=real.split()
# action=str(real[0])
# if (action=="Acc"):
#
# player.accler=True
# player.decler=False
#
# elif (action=="Dec"):
# player.accler=False
# player.decler=True
# elif (action=="Right"):
#
# player.right=True
# player.left=False
#
# elif (action=="Left"):
# player.left=True
# player.right=False
elif (realMsg.startswith("Stop")):
real=realMsg[5:]
sendMsg="stop" +" "+str(senderID)+" " +real+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
# player=self.players[senderID]
# real=real.split()
# action=str(real[0])
# # if (action=="Acc"):
#
# player.accler=False
# elif (action=="Dec"): player.decler=False
# elif (action=="Right"): player.right=False
# elif (action=="Left"): player.left=False
#
elif (realMsg.startswith("Upgrade")):
real=realMsg[8:]
sendMsg="upg" +" "+str(senderID)+" " +real+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
# player=self.players[senderID]
# real=real.split()
# upgrade=str(real[0])
# if (upgrade=="Hull"): player.upgradeHull()
# elif (upgrade=="Range"): player.upgradeRange()
# elif (upgrade=="Turn"): player.upgradeTurn()
# elif (upgrade=="Speed"): player.upgradeSpeed()
# elif(upgrade=="Collision"): player.upgradeCollision()
# elif(upgrade=="Reload"): player.upgradeReload()
# elif(upgrade=="Recover"): player.upgradeRecover()
# elif (upgrade=="Damage"): player.upgradeDamage()
#
elif (realMsg.startswith("Shoot")):
sendMsg = "sho "+" "+str(senderID) + " " + realMsg[6:] + "\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
realMsg=realMsg.split()
# pbulX,pbulY=int(realMsg[1]),int(realMsg[2])
# pbulDir=int(realMsg[3])
# pbulMove=int(realMsg[4])
# pbulDmg=int(realMsg[5])
# pName=str(realMsg[6])
# buldata1=[[pbulX,pbulY],pbulDir,pbulMove,pbulDmg,pName]
# self.bullet.append(buldata1)
elif (realMsg.startswith("chat")):
sendMsg="msg"+" "+ str(senderID) + " "+realMsg[5:]+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
elif (realMsg.startswith("Mine")):
sendMsg = "mn "+" "+str(senderID) + " " + realMsg[5:] + "\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
realMsg=realMsg.split()
# pMineX=int(realMsg[1])
# pMineY=int(realMsg[2])
# pName=str(realMsg[3])
# pDmg=int(realMsg[4])
# mndata1=((pMineX,pMineY),pName,pDmg)
# self.mines.append(mndata1)
elif (realMsg.startswith("leave")):
sendMsg = "leave" +" " +str(senderID) + " "+realMsg[5:]+"\n"
clients[clientID].send(bytes(sendMsg,"UTF-8"))
realMsg=realMsg.split()
self.leftClient.append(int(senderID))
for each in self.leftClient:
del clients[each]
serverChannel.task_done()
myserver=gameServer()
clients={}
curID=0
serverChannel=Queue(100)
threading.Thread(target = myserver.serverThread,args = (clients, serverChannel)).start()
# get player information
def clientThread(curID,clients,serverChannel):
while True:
client,addr=server.accept()
for clientID in clients:
client.send(("newPlayer %d\n"%clientID).encode())
clients[clientID].send(("newPlayer %d\n"%curID).encode())
clients[curID]=client
threading.Thread(target = myserver.threaded_client, args =
(client ,serverChannel, curID, clients)).start()
curID+=1
threading.Thread(target = clientThread,args = (curID,clients,serverChannel)).start()
def main():
while True:
clock.tick(60)
if (data1.start==False):
#game server events
data1.elapsed+=1
# myserver.sendToAll()
myserver.coinGen()
# myserver.centralPos()
# myserver.centralCondition()
# myserver.collisionCheck()
if __name__ == '__main__':
main()
|
server backup.py | import tkinter as tk
from tkinter import *
from PIL import Image, ImageTk
import ttk
import tkMessageBox
import socket
import time
import threading
import activeWindows
def server(port, host_username, password, room_name):
def test():
return
def server_thread():
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(("", int(port)))
clients_list = {}
mute_list = []
deaf_list = []
messages = []
print("server_thread is running")
server.listen(5)
def validate_client(connection):
try:
client_password = connection.recv(2000)
print(client_password)
if client_password == password:
# print("passwords match")
connection.send("1")
else:
# print("passwords do not match")
connection.send("0")
return False
except:
# print("Unable to validate password")
connection.send("0")
connection.close()
return False
try:
client_username = connection.recv(2000)
# print(client_username)
# print("username validated")
if client_username not in clients_list:
connection.send("1")
else:
connection.send("0")
return True
except:
# print("unable to validate username")
connection.send("0")
connection.close()
return False
def client_thread(client, username):
print("Running client_thread")
client.send(room_name)
for message in messages:
print(message)
client.send(message)
broadcast_message("", username + " has entered the chat \n")
while True:
try:
message = client.recv(2000)
print("message received")
if message:
if message[0: 3] == "***":
print("command received")
action_handler(message[3:])
elif username not in mute_list:
broadcast_message(username, message)
else:
client.close()
remove(username)
except:
continue
def action_handler(action):
# print("running action handler")
# print(action + " - action")
# print("printing sdfasdfsd")
action_split = action.split(" ", 1)
for action in action_split:
print(action)
command = action_split[0]
parameter = action_split[1]
# print(command)
# print(parameter)
if command == "d":
disconnect(parameter)
def broadcast_message(sender, message):
for client in clients_list:
if client not in deaf_list:
if sender != "":
full_message = sender + ": " + message + "\n"
else:
full_message = message
clients_list[client].send(full_message)
messages.append(full_message)
print("Message broadcasted")
# print(message)
def remove(connection):
# print(connection)
if connection in clients_list:
clients_list.pop(connection, None)
# print(clients_list)
details_clients = list(details_server_connections_listbox.get(0, END))
action_clients = list(actions_server_connections_listbox.get(0, END))
for client in details_clients:
if client == connection:
details_clients.remove(client)
for client in action_clients:
if client == connection:
action_clients.remove(client)
details_server_connections_listbox.delete(0, END)
actions_server_connections_listbox.delete(0, END)
for client in details_clients:
details_server_connections_listbox.insert(END, client)
for client in action_clients:
actions_server_connections_listbox.insert(END, client)
print("Connection removed")
def change_room_name():
new_room_name = actions_edit_room_name_entry.get()
actions_edit_room_name_entry.delete(0, END)
details_chatroom_name_label.config(text=new_room_name)
broadcast_message("", "***rnc " + new_room_name)
def shutdown():
try:
print("shutting down")
for client in clients_list:
clients_list[client].send("***sd")
time.sleep(1)
clients_list.clear()
server.close()
except:
print("unable to shut down")
activeWindows.server_window_isactive = False
server_window.destroy()
def disconnect(user):
try:
clients_list[user].send("***d")
remove(user)
print("user has disconnected")
broadcast_message("", user + " has disconnected \n")
except:
print("unable to disconnect user")
def kick():
try:
print("getting selected user")
user = actions_server_connections_listbox.get(actions_server_connections_listbox.curselection())
clients_list[user].send("***k")
remove(user)
print("user has been kicked")
broadcast_message("", user + " has been kicked from the chat \n")
except:
print("unable to get selected user")
def mute():
try:
user = actions_server_connections_listbox.get(actions_server_connections_listbox.curselection())
if user in mute_list:
clients_list[user].send("***um")
broadcast_message("", user + " has been unmuted \n")
mute_list.remove(user)
else:
clients_list[user].send("***m")
broadcast_message("", user + " has been muted \n")
mute_list.append(user)
print("user has been muted/unmuted")
except:
print("unable to get selected user")
def deafen():
print("printing user about to be deafened")
try:
user = actions_server_connections_listbox.get(actions_server_connections_listbox.curselection())
if user not in deaf_list:
clients_list[user].send("***df")
broadcast_message("", user + " has been deafened \n")
deaf_list.append(user)
else:
clients_list[user].send("***udf")
broadcast_message("", user + " has been undeafened \n")
deaf_list.remove(user)
print("user has been deafened/undeafened")
print(str(deaf_list))
except:
print("unable to get selected user for deafening")
actions_edit_room_name_button.config(command=change_room_name)
actions_mute_user_button.config(command=mute)
actions_deafen_user_button.config(command=deafen)
actions_kick_user_button.config(command=kick)
actions_shutdown_server_button.config(command=shutdown)
server_window.protocol("WM_DELETE_WINDOW", shutdown)
# print("Button has been bound")
while True:
clientsocket, address = server.accept()
try:
validated = clientsocket.recv(2000)
print("validated: " + validated)
clientsocket.send("1")
if bool(int(validated)):
client_username = clientsocket.recv(2000)
clients_list[client_username] = clientsocket
details_server_connections_listbox.insert(END, client_username)
actions_server_connections_listbox.insert(END, client_username)
clientsocket.send("1")
ct = threading.Thread(target=client_thread, args=(clientsocket, client_username))
ct.daemon = True
ct.start()
elif not validate_client(clientsocket):
clientsocket.close()
except:
break
print("broke out of loop")
def timer():
timer_counter = [0, 0, 0]
timer_pattern = '{0:02d}:{1:02d}:{2:02d}'
while True:
timer_counter[2] += 1
if timer_counter[2] >= 60:
timer_counter[2] = 0
timer_counter[1] += 1
if timer_counter[1] >= 60:
timer_counter[1] = 0
timer_counter[0] += 1
time_string = timer_pattern.format(timer_counter[0], timer_counter[1], timer_counter[2])
try:
details_server_time_running.config(text=time_string)
except:
pass
time.sleep(1)
def display_server_details():
details_server_ip_address.config(text=socket.gethostbyname(socket.gethostname()))
details_server_port.config(text=str(port))
details_server_password.config(text=password)
def alert_popup(title, reason, description):
popup = tk.Toplevel()
popup.title(title)
title = Label(popup, text=title + ":")
description = Label(popup, text=reason + ": " + description)
ok_button = Button(popup, text="OK", command=popup.destroy)
title.pack()
description.pack()
ok_button.pack()
popup.mainloop()
if port == "" or host_username == "" or room_name == "":
alert_popup("Error", "Parameter Error", "One or more of your parameters are either invalid or blank.")
exit()
try:
int(port)
except:
alert_popup("Error", "Parameter Error", "Your port number is invalid.")
exit()
### Window, Notebook, Toolbar, and Frames
server_window = tk.Toplevel()
server_window.title(room_name)
server_window.geometry("350x500")
server_window.iconbitmap("images//logo.ico")
server_window.resizable(False, False)
toolbar = Menu(server_window)
server_window.config(menu=toolbar)
toolbar.add_command(label="Exit", command=server_window.destroy)
toolbar.add_command(label="Help", command=test)
toolbar.add_command(label="Test", command=test)
notebook = ttk.Notebook(server_window)
details_frame = Frame(notebook)
action_frame = Frame(notebook)
logs_frame = Frame(notebook)
notebook.pack(fill="both", expand=True)
details_frame.pack(fill="both", expand=True)
action_frame.pack(fill="both", expand=True)
logs_frame.pack(fill="both", expand=True)
##### Widgets and Images #####
details_main_logo = ImageTk.PhotoImage(Image.open("images//logo.png").resize((30, 30)))
actions_main_logo = ImageTk.PhotoImage(Image.open("images//logo.png").resize((75, 75)))
chat_main_logo = ImageTk.PhotoImage(Image.open("images//logo.png").resize((100, 100)))
### Details
# Labels
details_chatroom_name_label = Label(details_frame, text=room_name, font=("Helvetica", 15))
details_title_label = Label(details_frame, text="Server Details:", font=("Helvetica", 14))
details_server_room_name_label = Label(details_frame, text="Room Name: ", font=("Helvetica", 10))
details_server_time_running_label = Label(details_frame, text="Time Active: ", font=("Helvetica", 10))
details_server_ip_address_label = Label(details_frame, text="Server IPV4 Address: ", font=("Helvetica", 10))
details_server_port_label = Label(details_frame, text="Server Port: ", font=("Helvetica", 10))
details_server_password_label = Label(details_frame, text="Server Password: ", font=("Helvetica", 10))
details_server_connections_label = Label(details_frame, text="Connected Clients: ", font=("Helvetica", 10))
details_server_room_name = Label(details_frame, text=room_name)
details_server_time_running = Label(details_frame, text="00:00")
details_server_ip_address = Label(details_frame, text="XXX.XXX.X.XXX")
details_server_port = Label(details_frame, text="1234")
details_server_password = Label(details_frame, text="abcdefg123")
details_logo_label = Label(details_frame, image=details_main_logo)
# Listboxes
details_server_connections_listbox = Listbox(details_frame, width=33, height=8, borderwidth=5, font=("Courier", 13))
### Actions
# Labels
actions_title_label = Label(action_frame, text="Server Actions", font=("Helvetica", 15))
actions_logo_label = Label(action_frame, image=actions_main_logo)
# Buttons
actions_edit_room_name_button = Button(action_frame, text="Change Room Name", borderwidth=3)
actions_kick_user_button = Button(action_frame, text="Kick User", borderwidth=3)
actions_mute_user_button = Button(action_frame, text="Mute/Unmute User", borderwidth=3)
actions_deafen_user_button = Button(action_frame, text="Deafen/Undeafen User", borderwidth=3)
actions_shutdown_server_button = Button(action_frame, text="Shut down Server", borderwidth=3)
# Entries
actions_edit_room_name_entry = Entry(action_frame, borderwidth=5)
# Listboxes
actions_server_connections_listbox = Listbox(action_frame, width=33, height=8, borderwidth=5, font=("Courier", 13))
##### Grids #####
### Details
# Labels
details_chatroom_name_label.grid(row=0, column=0, pady=3, columnspan=2)
details_title_label.grid(row=1, column=0, pady=1, columnspan=2)
details_server_room_name_label.grid(row=2, column=0, pady=1)
details_server_time_running_label.grid(row=3, column=0, pady=1)
details_server_ip_address_label.grid(row=4, column=0, pady=1)
details_server_port_label.grid(row=5, column=0, pady=1)
details_server_password_label.grid(row=6, column=0, pady=1)
details_server_connections_label.grid(row=7, column=0, pady=1)
details_server_room_name.grid(row=2, column=1, pady=1)
details_server_time_running.grid(row=3, column=1)
details_server_ip_address.grid(row=4, column=1)
details_server_port.grid(row=5, column=1)
details_server_password.grid(row=6, column=1)
details_logo_label.grid(row=9, column=0, columnspan=2)
# Listboxes
details_server_connections_listbox.grid(row=8, column=0, columnspan=2, pady=7)
### Logs
### Actions
# Labels
actions_title_label.grid(row=0, column=0, columnspan=2, pady=5)
actions_logo_label.grid(row=6, column=0, columnspan=2)
# Buttons
actions_edit_room_name_button.grid(row=1, column=0, pady=5)
actions_mute_user_button.grid(row=2, column=1, pady=5)
actions_deafen_user_button.grid(row=2, column=0, pady=5)
actions_kick_user_button.grid(row=4, column=0, pady=5, columnspan=2)
actions_shutdown_server_button.grid(row=5, column=0, pady=5, columnspan=2)
# Entries
actions_edit_room_name_entry.grid(row=1, column=1, pady=5)
# Listboxes
actions_server_connections_listbox.grid(row=3, column=0, columnspan=2, pady=5)
### Frames
notebook.add(details_frame, text="Details")
notebook.add(action_frame, text="Actions")
notebook.add(logs_frame, text="Log")
### Commands to be run
display_server_details()
st = threading.Thread(target=server_thread)
st.daemon = True
st.start()
tt = threading.Thread(target=timer)
tt.daemon = True
tt.start()
### Mainloop
server_window.mainloop()
|
test_redis_backend.py | from dogpile.cache.api import CachedValue, NO_VALUE
from dogpile.cache.region import _backend_loader
from dogpile.cache.region import value_version
from ._fixtures import _GenericBackendTest, _GenericMutexTest, _GenericBackendFixture
from . import eq_, assert_raises_message
from threading import Thread, Lock
from unittest import TestCase
import os
import pdb
import time
import unittest
import sys
from mock import patch, Mock
import msgpack
import pytest
REDIS_HOST = "127.0.0.1"
REDIS_PORT = int(os.getenv("DOGPILE_REDIS_PORT", "6379"))
COMPAT_PY3 = True if (sys.version_info > (3, 0)) else False
# import to register the plugin
import dogpile_backend_redis_advanced
"""
ABOUT THESE TESTS
Compatibility Tests
===
Tests that have `_Compatibility_` in the name are pegged to upstream tests. They
ensure compatibility with the core dogpile cache routines
* RedisAdvanced_Compatibility_Test
* RedisAdvanced_Compatibility_DistributedMutexTest
* RedisAdvanced_Compatibility_ConnectionTest
* RedisAdvancedHstore_Compatibility_Test
* RedisAdvancedHstore_Compatibility_DistributedMutexTest
* RedisAdvancedHstore_Compatibility_ConnectionTest
SerializedAlternate_Test
===
these tests use msgpack to test different serializers
HstoreTests
===
These test advanced support for hstore
tox -e py27 -- tests/cache/test_redis_backend.py::RedisAdvanced_SerializedAlternate_Test
"""
class _TestRedisConn(object):
@classmethod
def _check_backend_available(cls, backend):
try:
client = backend._create_client()
client.set("x", "y")
# on py3k it appears to return b"y"
assert client.get("x").decode("ascii") == "y"
client.delete("x")
except:
pytest.skip(
"redis is not running or " "otherwise not functioning correctly"
)
# ==============================================================================
class _Compatibility_Test(_TestRedisConn, _GenericBackendTest):
config_args = {
"arguments": {"host": REDIS_HOST, "port": REDIS_PORT, "db": 0, "foo": "barf"}
}
class RedisAdvanced_Compatibility_Test(_Compatibility_Test):
backend = "dogpile_backend_redis_advanced"
class RedisAdvancedHstore_Compatibility_Test(_Compatibility_Test):
backend = "dogpile_backend_redis_advanced_hstore"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class _Compatibility_DistributedMutexTest(_TestRedisConn, _GenericMutexTest):
config_args = {
"arguments": {
"host": REDIS_HOST,
"port": REDIS_PORT,
"db": 0,
"distributed_lock": True,
}
}
class RedisAdvanced_Compatibility_DistributedMutexTest(
_Compatibility_DistributedMutexTest
):
backend = "dogpile_backend_redis_advanced"
class RedisAdvancedHstore_Compatibility_DistributedMutexTest(
_Compatibility_DistributedMutexTest
):
backend = "dogpile_backend_redis_advanced_hstore"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@patch("redis.StrictRedis", autospec=True)
class _Compatibility_ConnectionTest(TestCase):
@classmethod
def setup_class(cls):
cls.backend_cls = _backend_loader.load(cls.backend)
try:
cls.backend_cls({})
except ImportError:
pytest.skip("Backend %s not installed" % cls.backend)
def _test_helper(self, mock_obj, expected_args, connection_args=None):
if connection_args is None:
connection_args = expected_args
self.backend_cls(connection_args)
mock_obj.assert_called_once_with(**expected_args)
def test_connect_with_defaults(self, MockStrictRedis):
# The defaults, used if keys are missing from the arguments dict.
arguments = {"host": "localhost", "password": None, "port": 6379, "db": 0}
self._test_helper(MockStrictRedis, arguments, {})
def test_connect_with_basics(self, MockStrictRedis):
arguments = {"host": "127.0.0.1", "password": None, "port": 6379, "db": 0}
self._test_helper(MockStrictRedis, arguments)
def test_connect_with_password(self, MockStrictRedis):
arguments = {
"host": "127.0.0.1",
"password": "some password",
"port": 6379,
"db": 0,
}
self._test_helper(MockStrictRedis, arguments)
def test_connect_with_socket_timeout(self, MockStrictRedis):
arguments = {
"host": "127.0.0.1",
"port": 6379,
"socket_timeout": 0.5,
"password": None,
"db": 0,
}
self._test_helper(MockStrictRedis, arguments)
def test_connect_with_connection_pool(self, MockStrictRedis):
pool = Mock()
arguments = {"connection_pool": pool, "socket_timeout": 0.5}
expected_args = {"connection_pool": pool}
self._test_helper(MockStrictRedis, expected_args, connection_args=arguments)
def test_connect_with_url(self, MockStrictRedis):
arguments = {"url": "redis://redis:password@127.0.0.1:6379/0"}
self._test_helper(MockStrictRedis.from_url, arguments)
class RedisAdvanced_Compatibility_ConnectionTest(_Compatibility_ConnectionTest):
backend = "dogpile_backend_redis_advanced"
class RedisAdvancedHstore_Compatibility_ConnectionTest(_Compatibility_ConnectionTest):
backend = "dogpile_backend_redis_advanced_hstore"
# ==============================================================================
def my_loads(value):
"""'
we need to unpack the value and stash it into a CachedValue
we support strings in this version, because it's used in unit tests
that require the ability to set/read raw data.
we could disable that test, but this workaround supports it.
"""
if COMPAT_PY3:
# this is True for backward compatibility
value = msgpack.unpackb(value, use_list=False, raw=False)
else:
value = msgpack.unpackb(value, use_list=False)
if isinstance(value, tuple):
return CachedValue(*value)
return value
class _SerializedAlternate_Test(_TestRedisConn, _GenericBackendTest):
config_args = {
"arguments": {
"host": REDIS_HOST,
"port": REDIS_PORT,
"db": 0,
"foo": "barf",
"loads": my_loads,
"dumps": msgpack.packb,
}
}
class RedisAdvanced_SerializedAlternate_Test(_SerializedAlternate_Test):
backend = "dogpile_backend_redis_advanced"
class RedisAdvancedHstore_SerializedAlternate_Test(_SerializedAlternate_Test):
backend = "dogpile_backend_redis_advanced_hstore"
# ==============================================================================
def raw_loads(value):
"""'
we need to unpack the value and stash it into a CachedValue
"""
if COMPAT_PY3:
# this is True for backward compatibility
value = msgpack.unpackb(value, use_list=False, raw=False)
else:
value = msgpack.unpackb(value, use_list=False)
return CachedValue(value, {"ct": time.time(), "v": value_version})
def raw_dumps(value):
if isinstance(value, CachedValue):
value = value.payload
value = msgpack.packb(value)
return value
class _SerializedRaw_Test(_TestRedisConn, _GenericBackendTest):
""""""
config_args = {
"arguments": {
"host": REDIS_HOST,
"port": REDIS_PORT,
"db": 0,
"foo": "barf",
"loads": raw_loads,
"dumps": raw_dumps,
"redis_expiration_time": 1,
}
}
@unittest.skip("do not test get/set of raw value")
def test_backend_set_get_value(self):
pass
@unittest.skip("do not test region expiry, we defer expiry to the cloud")
def test_region_expire(self):
pass
def test_threaded_dogpile(self):
"""
this is modified version of the upstream fixture test
1. adjusted the sleep
2. removed the region arguments
"""
# run a basic dogpile concurrency test.
# note the concurrency of dogpile itself
# is intensively tested as part of dogpile.
reg = self._region()
lock = Lock()
canary = []
def creator():
ack = lock.acquire(False)
canary.append(ack)
time.sleep(1)
if ack:
lock.release()
return "some value"
def f():
for x in range(5):
reg.get_or_create("some key", creator)
time.sleep(1.25)
threads = [Thread(target=f) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
assert len(canary) > 2
if not reg.backend.has_lock_timeout():
assert False not in canary
else:
assert False in canary
class RedisAdvanced_SerializedRaw_Test(_SerializedRaw_Test):
backend = "dogpile_backend_redis_advanced"
class RedisAdvancedHstore_SerializedRaw_Test(_SerializedRaw_Test):
backend = "dogpile_backend_redis_advanced_hstore"
# ==============================================================================
# make this simple
key_string = "some_key"
key_hash = ("some_key", "h1")
cloud_value = "some value"
keys_mixed = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
("a", 10),
("a", 30),
("a", 20),
("b", 9),
("c", 8),
("d", 7),
("e", 6),
("f", 5),
("g", 4),
("h", 3),
("i", 2),
("j", 1),
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
]
def keys_multiplier(x):
return x * 2
mixed_generated = []
for k in keys_mixed:
if isinstance(k, tuple):
mixed_generated.append((k, keys_multiplier(k[1])))
else:
mixed_generated.append((k, keys_multiplier(k)))
class HstoreTest(_TestRedisConn, _GenericBackendFixture, TestCase):
# tox -e py27 -- tests/cache/test_redis_backend.py::HstoreTest
backend = "dogpile_backend_redis_advanced_hstore"
config_args = {
"arguments": {
"host": REDIS_HOST,
"port": REDIS_PORT,
"db": 0,
"redis_expiration_time": 3,
}
}
def test_backend_set_get_delete(self):
"""
this tests
* get
* set
* delete
"""
backend = self._backend()
# strings
backend.set(key_string, cloud_value)
eq_(backend.get(key_string), cloud_value)
backend.delete(key_string)
eq_(backend.get(key_string), NO_VALUE)
# make sure we delete above. otherwise the test will fail by trying to
# use a hmset on a normal key
# hstore
backend.set(key_hash, cloud_value)
eq_(backend.get(key_hash), cloud_value)
backend.delete(key_hash)
eq_(backend.get(key_hash), NO_VALUE)
def test_mixed_keys(self):
"""
this tests
* get_multi
* set_multi
* delete_multi
"""
backend = self._backend()
# set up the mapping
mixed_mapping = dict(mixed_generated)
# upload the mapping
backend.set_multi(mixed_mapping)
# grab the results
results = backend.get_multi(keys_mixed)
# enumerate the results, match their order to the ordered array
for idx, result in enumerate(results):
eq_(result, mixed_generated[idx][1])
# delete them all
backend.delete_multi(keys_mixed)
# grab the results
results = backend.get_multi(keys_mixed)
# ensure they're all misses
for _result in results:
eq_(_result, NO_VALUE)
class HstoreTest_Expires_Hash(HstoreTest):
redis_expiration_time_hash = None
config_args = {
"arguments": {
"host": REDIS_HOST,
"port": REDIS_PORT,
"db": 0,
"redis_expiration_time": 3,
"redis_expiration_time_hash": redis_expiration_time_hash,
}
}
def test_expires(self):
"""
this tests
* get_multi
* set_multi
* delete_multi
"""
backend = self._backend()
# hstore
backend.set(key_hash, cloud_value)
eq_(backend.get(key_hash), cloud_value)
# we don't set ttl on `redis_expiration_time_hash = False`
if self.redis_expiration_time_hash is not False:
ttl = backend.client.ttl(key_hash[0])
assert ttl >= 1, "ttl should be larger"
backend.delete(key_hash)
eq_(backend.get(key_hash), NO_VALUE)
def test_expires_multi(self):
"""
this tests
* get_multi
* set_multi
* delete_multi
"""
backend = self._backend()
# hstore
mixed_mapping = dict(mixed_generated)
backend.set_multi(mixed_mapping)
# grab the results
results = backend.get_multi(keys_mixed)
# enumerate the results, match their order to the ordered array
for idx, result in enumerate(results):
eq_(result, mixed_generated[idx][1])
# we don't set ttl on `redis_expiration_time_hash = False`
if self.redis_expiration_time_hash is not False:
# make sure every key has an expiry!
for k in keys_mixed:
if isinstance(k, tuple):
k = k[0]
ttl = backend.client.ttl(k)
assert ttl >= 0, "ttl should be larger"
# delete them all
backend.delete_multi(keys_mixed)
class HstoreTest_Expires_HashTrue(HstoreTest_Expires_Hash):
redis_expiration_time_hash = True
config_args = {
"arguments": {
"host": REDIS_HOST,
"port": REDIS_PORT,
"db": 0,
"redis_expiration_time": 10,
"redis_expiration_time_hash": redis_expiration_time_hash,
}
}
def test_expires_tracked(self):
"""
When redis_expiration_time_hash is True, we should be setting the
expiry on every hash set.
to test this, we're just going to loop this a few times
the loop should reset the expiry to 10 seconds, then sleep 1s, so it will
always be >= 9.
"""
backend = self._backend()
for i in range(0, 3):
backend.set(key_hash, cloud_value)
eq_(backend.get(key_hash), cloud_value)
ttl = backend.client.ttl(key_hash[0])
assert ttl >= 9, "ttl should be larger"
time.sleep(1)
backend.delete(key_hash)
eq_(backend.get(key_hash), NO_VALUE)
def test_expires_tracked_multi(self):
backend = self._backend()
# set up the mapping
mixed_mapping = dict(mixed_generated)
for i in range(0, 3):
# upload the mapping
backend.set_multi(mixed_mapping)
# grab the results
results = backend.get_multi(keys_mixed)
# enumerate the results, match their order to the ordered array
for idx, result in enumerate(results):
eq_(result, mixed_generated[idx][1])
key = keys_mixed[idx]
if isinstance(key, tuple):
key = key[0]
ttl = backend.client.ttl(key)
assert ttl >= 9, "ttl should be larger"
time.sleep(1)
backend.delete_multi(keys_mixed)
class HstoreTest_Expires_HashNone(HstoreTest_Expires_Hash):
redis_expiration_time_hash = None
config_args = {
"arguments": {
"host": REDIS_HOST,
"port": REDIS_PORT,
"db": 0,
"redis_expiration_time": 10,
"redis_expiration_time_hash": redis_expiration_time_hash,
}
}
def test_expires_tracked(self):
"""
When redis_expiration_time_hash is None, we should be setting the
expiry only if the key doesn't exist
to test this, we're just going to loop this a few times
the loop should reset the expiry to 3 seconds, then sleep 1s, so it will
always be about 5
"""
backend = self._backend()
for i in range(0, 5):
backend.set(key_hash, cloud_value)
eq_(backend.get(key_hash), cloud_value)
time.sleep(1)
ttl = backend.client.ttl(key_hash[0])
assert ttl <= 6, "ttl should be <= 6"
assert ttl >= 4, "ttl should be <= 4"
backend.delete(key_hash)
eq_(backend.get(key_hash), NO_VALUE)
def test_expires_tracked_multi(self):
backend = self._backend()
# set up the mapping
mixed_mapping = dict(mixed_generated)
# loop over this a bit setting and sleeping
for i in range(0, 5):
# upload the mapping
backend.set_multi(mixed_mapping)
# grab the results
results = backend.get_multi(keys_mixed)
# enumerate the results, match their order to the ordered array
for idx, result in enumerate(results):
eq_(result, mixed_generated[idx][1])
time.sleep(1)
# check the ttls. we should not have set them on the subsequent loops
for key in keys_mixed:
if isinstance(key, tuple):
key = key[0]
ttl = backend.client.ttl(key)
assert ttl <= 6, "ttl should be <= 6"
assert ttl >= 4, "ttl should be <= 4"
backend.delete_multi(keys_mixed)
class HstoreTest_Expires_HashFalse(HstoreTest_Expires_Hash):
redis_expiration_time_hash = False
config_args = {
"arguments": {
"host": REDIS_HOST,
"port": REDIS_PORT,
"db": 0,
"redis_expiration_time": 2,
"redis_expiration_time_hash": False,
}
}
def test_expires_tracked(self):
"""
When redis_expiration_time_hash is False, we should be ignoring hash
expiry so it should always be -1.
"""
backend = self._backend()
for i in range(0, 3):
backend.set(key_hash, cloud_value)
eq_(backend.get(key_hash), cloud_value)
ttl = backend.client.ttl(key_hash[0])
assert ttl == -1, "ttl should be -1"
backend.delete(key_hash)
eq_(backend.get(key_hash), NO_VALUE)
def test_expires_tracked_multi(self):
backend = self._backend()
# set up the mapping
mixed_mapping = dict(mixed_generated)
# loop over this a bit setting and sleeping
for i in range(0, 3):
# upload the mapping
backend.set_multi(mixed_mapping)
# grab the results
results = backend.get_multi(keys_mixed)
# enumerate the results, match their order to the ordered array
for idx, result in enumerate(results):
eq_(result, mixed_generated[idx][1])
# and make sure we did not set the ttl
for key in keys_mixed:
if isinstance(key, tuple):
key = key[0]
ttl = backend.client.ttl(key)
assert ttl == -1, "ttl should be -1"
time.sleep(1)
backend.delete_multi(keys_mixed)
class RedisDistributedMutexCustomPrefixTest(_TestRedisConn, _GenericMutexTest):
backend = "dogpile_backend_redis_advanced_hstore"
config_args = {
"arguments": {
"host": REDIS_HOST,
"port": REDIS_PORT,
"db": 0,
"distributed_lock": True,
"lock_prefix": "_lk-",
}
}
def test_prefix(self):
"""
test the lock being set to the desired prefix by querying for a
value of the prefix. since the value is not managed as a normal key,
the test is performed using the backend client
"""
reg = self._region()
key = "creator"
value = "creator value"
def creator():
lock_key = self.config_args["arguments"]["lock_prefix"] + key
locked = reg.backend.client.get(lock_key)
assert locked and locked is not NO_VALUE
return value
assert reg.get_or_create(key, creator) == value
# reset the region...
reg.delete(key)
class RedisDistributedLockProxy(object):
"""base lock wrapper for testing"""
mutex = None
def __init__(self, mutex):
self.mutex = mutex
def acquire(self, *_args, **_kwargs):
return self.mutex.acquire(*_args, **_kwargs)
def release(self):
raise NotImplementedError()
class RedisDistributedLockProxySilent(RedisDistributedLockProxy):
"""example lock wrapper
this will silently pass if a LockError is encountered
"""
def release(self):
# defer imports until backend is used
global redis
import redis # noqa
try:
self.mutex.release()
except redis.exceptions.LockError as e:
# log.debug("safe lock timeout")
pass
except Exception as e:
raise
class RedisDistributedLockProxyFatal(RedisDistributedLockProxy):
"""example lock wrapper
this will re-raise LockErrors but give a hook to log or retry
"""
def release(self):
# defer imports until backend is used
global redis
import redis # noqa
try:
self.mutex.release()
except redis.exceptions.LockError as e:
raise
except Exception as e:
raise
class RedisDistributedMutexSilentLockTest(_TestRedisConn, _GenericMutexTest):
backend = "dogpile_backend_redis_advanced_hstore"
config_args = {
"arguments": {
"host": "127.0.0.1",
"port": 6379,
"db": 0,
"distributed_lock": True,
"lock_class": RedisDistributedLockProxySilent,
"lock_timeout": 1,
"redis_expiration_time": 1,
}
}
def test_pass_lock_timeout__single(self):
reg = self._region()
# ensure this works instantly.
def creator():
return "creator value"
assert reg.get_or_create("creator", creator) == "creator value"
# reset the region...
reg.delete("creator")
# can this work on a timeout?
# sleep for 1 second longer than the timeout, so redis must expire
def creator_sleep():
time.sleep(self.config_args["arguments"]["lock_timeout"] + 1)
return "creator_sleep value"
assert (
reg.get_or_create("creator_sleep", creator_sleep) == "creator_sleep value"
)
# no need reset, the `creator_sleep` is timed out
def test_pass_lock_timeout__multi(self):
reg = self._region()
def _creator_multi(*_creator_keys):
time.sleep(self.config_args["arguments"]["lock_timeout"] + 1)
# rval is an ordered list
return [int(_k[-1]) for _k in _creator_keys]
_values_expected = [1, 2, 3]
_keys = [str("creator_sleep_multi-%s" % i) for i in _values_expected]
_values = reg.get_or_create_multi(_keys, _creator_multi)
assert _values == _values_expected
# reset the region...
for _k in _keys:
reg.delete(_k)
class RedisDistributedMutexFatalLockTest(_TestRedisConn, _GenericMutexTest):
backend = "dogpile_backend_redis_advanced_hstore"
config_args = {
"arguments": {
"host": "127.0.0.1",
"port": 6379,
"db": 0,
"distributed_lock": True,
"lock_class": RedisDistributedLockProxyFatal,
"lock_timeout": 1,
"redis_expiration_time": 1,
}
}
def test_pass_lock_timeout__single(self):
reg = self._region()
# ensure this works instantly.
def creator():
return "creator value"
assert reg.get_or_create("creator", creator) == "creator value"
# can this work on a timeout?
# sleep for 1 second longer than the timeout, so redis must expire
def creator_sleep():
time.sleep(self.config_args["arguments"]["lock_timeout"] + 1)
return "creator_sleep value"
try:
result = reg.get_or_create("creator_sleep", creator_sleep)
raise ValueError("expected an error!")
except redis.exceptions.LockError as e:
pass
def test_pass_lock_timeout__multi(self):
reg = self._region()
def _creator_multi(*_creator_keys):
time.sleep(self.config_args["arguments"]["lock_timeout"] + 1)
# rval is an ordered list
return [int(_k[-1]) for _k in _creator_keys]
_values_expected = [1, 2, 3]
_keys = [str("creator_sleep_multi-%s" % i) for i in _values_expected]
try:
_values = reg.get_or_create_multi(_keys, _creator_multi)
raise ValueError("expected an error!")
except redis.exceptions.LockError as e:
pass
|
emulation_server.py | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from __future__ import print_function
import six.moves.socketserver
import select
import socket
import sys
from threading import Thread
class EmulationServer(object):
def __init__(self, host, port, emulator):
self.host = host
self.port = port
self.emulator = emulator
def start(self, join=True):
if not join:
t = Thread(target=self.run)
t.start()
else:
self.run()
def stop(self):
self._alive = False
self.server.server_close()
def run(self, host=None, port=None):
if host is None:
host = self.host
if port is None:
port = self.port
if not (port and host):
return
print('serving on {}:{}'.format(host, port))
server = six.moves.socketserver.TCPServer((host, port), self.emulator)
server.allow_reuse_address = True
self.server = server
server.serve_forever()
def run_link(self, host=None, port=None):
if host is None:
host = self.host
if port is None:
port = self.port
if not (port and host):
return
self._alive = True
c = self.emulator
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen(5)
input_ = [server, sys.stdin]
while self._alive:
inputready, _outputready, _exceptready = select.select(input_, [], [], 0.05)
for s in inputready:
if s == server:
# handle the server socket
client, _address = server.accept()
input_.append(client)
elif s == sys.stdin:
# handle standard input_
_junk = sys.stdin.readline()
running = 0
else:
# handle all other sockets
data = c.handle(s.recv(1024))
if data:
try:
s.send(data)
except socket.error:
pass
else:
s.close()
input_.remove(s)
# ============= EOF =============================================
|
optimize_crystal_energy_stability.py | """ Optimize the volume for the conducting ions
"""
import argparse
import logging
import math
import os
import time
import pandas as pd
# Apparently there's an issue with the latest version of pandas.
# Got this fix from here:
# https://github.com/pandas-profiling/pandas-profiling/issues/662#issuecomment-803673639
pd.set_option("display.max_columns", None)
import random
import ujson
import gzip
import pathlib
import tensorflow as tf
import nfp
from pymatgen.core import Structure
from rlmolecule.crystal.builder import CrystalBuilder
from rlmolecule.crystal.crystal_problem import CrystalTFAlphaZeroProblem
from rlmolecule.crystal.crystal_state import CrystalState
from rlmolecule.sql.run_config import RunConfig
# from rlmolecule.tree_search.reward import RankedRewardFactory
from rlmolecule.tree_search.reward import RankedRewardFactory
from rlmolecule.sql import Base, Session
from rlmolecule.sql.tables import GameStore
from scripts.nfp_extensions import RBFExpansion, CifPreprocessor
from scripts import nrelmatdbtaps
from scripts import stability
from scripts import ehull
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def read_structures_file(structures_file):
logger.info(f"reading {structures_file}")
with gzip.open(structures_file, 'r') as f:
structures_dict = ujson.loads(f.read().decode())
structures = {}
for key, structure_dict in structures_dict.items():
structures[key] = Structure.from_dict(structure_dict)
logger.info(f"\t{len(structures)} structures read")
return structures
def write_structures_file(structures_file, structures_dict):
logger.info(f"writing {structures_file}")
with gzip.open(structures_file, 'w') as out:
out.write(ujson.dumps(structures_dict, indent=2).encode())
def generate_decoration(state: CrystalState) -> Structure:
# Create the decoration of this composition onto this prototype structure
# the 'action_node' string has the following format at this point:
# comp_type|prototype_structure|decoration_idx
# we just need 'comp_type|prototype_structure' to get the icsd structure
structure_key = '|'.join(state.action_node.split('|')[:-1])
icsd_prototype = structures[structure_key]
decoration_idx = int(state.action_node.split('|')[-1]) - 1
decorated_structure, stoich = CrystalState.decorate_prototype_structure(
icsd_prototype, state.composition, decoration_idx=decoration_idx)
return decorated_structure
@tf.function(experimental_relax_shapes=True)
def predict(model: 'tf.keras.Model', inputs):
return model.predict_step(inputs)
class CrystalEnergyStabilityOptProblem(CrystalTFAlphaZeroProblem):
def __init__(self,
engine: 'sqlalchemy.engine.Engine',
energy_model: 'tf.keras.Model',
df_competing_phases: 'pd.DataFrame',
# initial_state: str,
**kwargs) -> None:
""" A class to estimate the suitability of a crystal structure as a solid state battery
:param engine: A sqlalchemy engine pointing to a suitable database backend
:param builder: A CrystalBuilder class to handle crystal construction based on ICSD structures
:param energy_model: A tensorflow model to estimate the total energy of a structure
"""
# self.initial_state = initial_state
self.engine = engine
self.energy_model = energy_model
self.df_competing_phases = df_competing_phases
# since the reward values can take positive or negative values, centered around 0,
# set the default reward lower so that failed runs have a smaller reward
self.default_reward = -5
super(CrystalEnergyStabilityOptProblem, self).__init__(engine, **kwargs)
def get_reward(self, state: CrystalState) -> (float, {}):
if state.terminal:
# skip this structure if it is too large for the model
# TODO truncate the structure?
structure_key = '|'.join(state.action_node.split('|')[:-1])
icsd_prototype = structures[structure_key]
if len(icsd_prototype.sites) > 150:
return self.default_reward, {'terminal': True,
'num_sites': len(icsd_prototype.sites),
'state_repr': repr(state)}
# generate the decoration for this state
try:
decorated_structure = generate_decoration(state)
except AssertionError as e:
print(f"AssertionError: {e}")
return self.default_reward, {'terminal': True, 'state_repr': repr(state)}
predicted_energy, hull_energy = self.calc_energy_stability(decorated_structure)
# print(str(state), predicted_energy)
# Predict the total energy and stability of this decorated structure
info = {
'terminal': True,
'predicted_energy': predicted_energy.astype(float),
'hull_energy': hull_energy.astype(float),
'num_sites': len(decorated_structure.sites),
'state_repr': repr(state),
}
# return stability, info
# since any hull energy is better than nothing, add 10 to all the energies
# (with the hull energy flipped since more negative is more stable)
return - hull_energy.astype(float), info
return self.default_reward, {'terminal': False, 'state_repr': repr(state)}
def get_model_inputs(self, structure) -> {}:
inputs = preprocessor.construct_feature_matrices(structure, train=False)
print(inputs)
# return {key: tf.constant(np.expand_dims(val, 0)) for key, val in inputs.items()}
return inputs
# @collect_metrics
def calc_energy_stability(self, structure: Structure, state=None):
""" Predict the total energy of the structure using a GNN model (trained on unrelaxed structures)
"""
# model_inputs = self.get_model_inputs(structure)
# predicted_energy = predict(self.energy_model, model_inputs)
dataset = tf.data.Dataset.from_generator(
# lambda: preprocessor.construct_feature_matrices(structure, train=False),
lambda: (preprocessor.construct_feature_matrices(s, train=False) for s in [structure]),
output_types=preprocessor.output_types,
output_shapes=preprocessor.output_shapes) \
.padded_batch(batch_size=32,
padded_shapes=preprocessor.padded_shapes(max_sites=256, max_bonds=2048),
padding_values=preprocessor.padding_values)
predicted_energy = self.energy_model.predict(dataset)
predicted_energy = predicted_energy[0][0]
hull_energy = self.convex_hull_stability(structure, predicted_energy)
if hull_energy is None:
# set the default hull energy as slightly bigger than the default energy
hull_energy = -self.default_reward - 1
return predicted_energy, hull_energy
def convex_hull_stability(self, structure: Structure, predicted_energy):
strc = structure
# Add the new composition and the predicted energy to "df" if DFT energy already not present
comp = strc.composition.reduced_composition.alphabetical_formula.replace(' ', '')
df = self.df_competing_phases
if comp not in df.reduced_composition.tolist():
df = self.df_competing_phases.append(
{'sortedformula': comp, 'energyperatom': predicted_energy, 'reduced_composition': comp},
ignore_index=True)
# Create a list of elements in the composition
ele = strc.composition.chemical_system.split('-')
# Create input file for stability analysis
inputs = nrelmatdbtaps.create_input_DFT(ele, df, chempot='ferev2')
# Run stability function (args: input filename, composition)
stable_state = stability.run_stability(inputs, comp)
if stable_state == 'UNSTABLE':
stoic = ehull.frac_stoic(comp)
hull_nrg = ehull.unstable_nrg(stoic, comp, inputs)
# print("energy above hull of this UNSTABLE phase is", hull_nrg, "eV/atom")
elif stable_state == 'STABLE':
stoic = ehull.frac_stoic(comp)
hull_nrg = ehull.stable_nrg(stoic, comp, inputs)
# print("energy above hull of this STABLE phase is", hull_nrg, "eV/atom")
else:
print(f"ERR: unrecognized stable_state: '{stable_state}'")
return hull_nrg
## TODO
# @collect_metrics
# def calc_reward(self, state: CrystalState) -> (float, {}):
# """
# """
# reward = 0
# stats = {}
# return reward, stats
def create_problem():
prob_config = run_config.problem_config
run_id = run_config.run_id
train_config = run_config.train_config
reward_factory = RankedRewardFactory(engine=engine,
run_id=run_id,
reward_buffer_min_size=train_config.get('reward_buffer_min_size', 10),
reward_buffer_max_size=train_config.get('reward_buffer_max_size', 50),
ranked_reward_alpha=train_config.get('ranked_reward_alpha', 0.75))
# reward_factory = LinearBoundedRewardFactory(min_reward=train_config.get('min_reward', 0),
# max_reward=train_config.get('max_reward', 1))
problem = CrystalEnergyStabilityOptProblem(engine,
energy_model,
df_competing_phases,
run_id=run_id,
reward_class=reward_factory,
features=train_config.get('features', 64),
num_heads=train_config.get('num_heads', 4),
num_messages=train_config.get('num_messages', 3),
max_buffer_size=train_config.get('max_buffer_size', 200),
min_buffer_size=train_config.get('min_buffer_size', 15),
batch_size=train_config.get('batch_size', 32),
policy_checkpoint_dir=train_config.get('policy_checkpoint_dir',
'policy_checkpoints'),
actions_to_ignore=prob_config.get('actions_to_ignore', None),
)
return problem
def run_games():
from rlmolecule.alphazero.alphazero import AlphaZero
builder = CrystalBuilder()
config = run_config.mcts_config
game = AlphaZero(
create_problem(),
min_reward=config.get('min_reward', 0.0),
pb_c_base=config.get('pb_c_base', 1.0),
pb_c_init=config.get('pb_c_init', 1.25),
dirichlet_noise=config.get('dirichlet_noise', True),
dirichlet_alpha=config.get('dirichlet_alpha', 1.0),
dirichlet_x=config.get('dirichlet_x', 0.25),
# MCTS parameters
ucb_constant=config.get('ucb_constant', math.sqrt(2)),
state_builder=builder,
)
# game = MCTS(
# create_problem(),
# )
# i = 0
while True:
path, reward = game.run(
num_mcts_samples=config.get('num_mcts_samples', 5),
max_depth=config.get('max_depth', 1000000),
)
logger.info(f'Game Finished -- Reward {reward.raw_reward:.3f} -- Final state {path[-1]}')
# i += 1
# if i % 10 == 0:
# print(f"decoration_time: {decoration_time}, model_time: {model_time}")
# df = pd.DataFrame(volume_stats).T
# df.columns = ['conducting_ion_vol', 'total_vol', 'fraction', 'comp_type']
# df = df.sort_index()
# print(f"writing current stats to {out_file}")
# df.to_csv(out_file, sep='\t')
# write_structures_file(decorations_file, decorations)
def train_model():
config = run_config.train_config
create_problem().train_policy_model(steps_per_epoch=config.get('steps_per_epoch', 100),
lr=float(config.get('lr', 1E-3)),
epochs=int(float(config.get('epochs', 1E4))),
game_count_delay=config.get('game_count_delay', 20),
verbose=config.get('verbose', 2))
# TODO copied from alphazero_problem.py
def iter_recent_games():
"""Iterate over randomly chosen positions in games from the replay buffer
:returns: a generator of (serialized_parent, visit_probabilities, scaled_reward) pairs
"""
recent_games = session.query(GameStore).filter_by(run_id=run_id) \
.order_by(GameStore.time.desc()).limit(200)
for game in recent_games:
parent_state_string, visit_probabilities = random.choice(game.search_statistics)
policy_digests, visit_probs = zip(*visit_probabilities)
yield ([parent_state_string] + list(policy_digests), [game.scaled_reward] + list(visit_probs))
def monitor():
from rlmolecule.sql.tables import RewardStore
problem = create_problem()
while True:
# best_reward = problem.session.query(RewardStore) \
# .filter_by(run_id=problem.run_id) \
best_reward = session.query(RewardStore) \
.filter_by(run_id=run_id) \
.order_by(RewardStore.reward.desc()).first()
num_games = len(list(iter_recent_games()))
print(best_reward, num_games)
if best_reward:
print(f"Best Reward: {best_reward.reward:.3f} for molecule "
f"{best_reward.data['smiles']} with {num_games} games played")
time.sleep(5)
# load the icsd prototype structures
# https://pymatgen.org/usage.html#side-note-as-dict-from-dict
icsd_prototypes_file = "../../rlmolecule/crystal/inputs/icsd_prototypes.json.gz"
structures = read_structures_file(icsd_prototypes_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run the battery structure stable energy optimization. ' +
'Default is to run the script locally')
parser.add_argument('--config', type=str, help='Configuration file')
parser.add_argument('--train-policy', action="store_true", default=False,
help='Train the policy model only (on GPUs)')
parser.add_argument('--rollout', action="store_true", default=False, help='Run the game simulations only (on CPUs)')
parser.add_argument('--energy-model',
type=pathlib.Path,
required=True,
help='Model for predicting total energy of a battery system')
args = parser.parse_args()
run_config = RunConfig(args.config)
run_id = run_config.run_id
engine = run_config.start_engine()
# Initialize the preprocessor class
preprocessor = CifPreprocessor(num_neighbors=12)
preprocessor.from_json('inputs/preprocessor.json')
# keep track of how much time each part takes
# model_time = 0
# decoration_time = 0
energy_model = tf.keras.models.load_model(args.energy_model,
custom_objects={**nfp.custom_objects, **{'RBFExpansion': RBFExpansion}})
# Dataframe containing competing phases from NRELMatDB
print("Reading inputs/competing_phases.csv")
df_competing_phases = pd.read_csv('inputs/competing_phases.csv')
print(df_competing_phases.head(3))
Base.metadata.create_all(engine, checkfirst=True)
Session.configure(bind=engine)
session = Session()
if args.train_policy:
train_model()
if args.rollout:
# make sure the rollouts do not use the GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
run_games()
else:
monitor()
# jobs = [multiprocessing.Process(target=monitor)]
# jobs[0].start()
# time.sleep(1)
#
# for i in range(5):
# jobs += [multiprocessing.Process(target=run_games)]
#
# jobs += [multiprocessing.Process(target=train_model)]
#
# for job in jobs[1:]:
# job.start()
#
# start = time.time()
# while time.time() - start <= run_config.problem_config.get('timeout', 300):
# time.sleep(1)
#
# for j in jobs:
# j.terminate()
# j.join()
|
pipe_process.py | '''
Date: 2021.06.01 15:07
Description : Omit
LastEditors: Rustle Karl
LastEditTime: 2021.06.01 15:07
'''
import multiprocessing
def create_items(pipe):
output_pipe, _ = pipe
for item in range(10):
output_pipe.send(item)
output_pipe.close()
def multiply_items(pipe_1, pipe_2):
close, input_pipe = pipe_1
close.close()
output_pipe, _ = pipe_2
try:
while True:
item = input_pipe.recv()
output_pipe.send(item * item)
except EOFError:
output_pipe.close()
if __name__ == '__main__':
# 第一个进程管道发出数字
pipe_1 = multiprocessing.Pipe(True)
process_pipe_1 = multiprocessing.Process(target=create_items, args=(pipe_1,))
process_pipe_1.start()
# 第二个进程管道接收数字并计算
pipe_2 = multiprocessing.Pipe(True)
process_pipe_2 = multiprocessing.Process(target=multiply_items, args=(pipe_1, pipe_2,))
process_pipe_2.start()
pipe_1[0].close()
pipe_2[0].close()
try:
while True:
print(pipe_2[1].recv())
except EOFError:
print("End")
|
python_lsp.py | # Copyright 2017-2020 Palantir Technologies, Inc.
# Copyright 2021- Python Language Server Contributors.
from functools import partial
import logging
import os
import socketserver
import threading
from pylsp_jsonrpc.dispatchers import MethodDispatcher
from pylsp_jsonrpc.endpoint import Endpoint
from pylsp_jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter
from . import lsp, _utils, uris
from .config import config
from .workspace import Workspace
log = logging.getLogger(__name__)
LINT_DEBOUNCE_S = 0.5 # 500 ms
PARENT_PROCESS_WATCH_INTERVAL = 10 # 10 s
MAX_WORKERS = 64
PYTHON_FILE_EXTENSIONS = ('.py', '.pyi')
CONFIG_FILEs = ('pycodestyle.cfg', 'setup.cfg', 'tox.ini', '.flake8')
class _StreamHandlerWrapper(socketserver.StreamRequestHandler):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super().setup()
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self):
try:
self.delegate.start()
except OSError as e:
if os.name == 'nt':
# Catch and pass on ConnectionResetError when parent process
# dies
# pylint: disable=no-member, undefined-variable
if isinstance(e, WindowsError) and e.winerror == 10054:
pass
self.SHUTDOWN_CALL()
def start_tcp_lang_server(bind_addr, port, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLSPServer):
raise ValueError('Handler class must be an instance of PythonLSPServer')
def shutdown_server(check_parent_process, *args):
# pylint: disable=unused-argument
if check_parent_process:
log.debug('Shutting down server')
# Shutdown call must be done on a thread, to prevent deadlocks
stop_thread = threading.Thread(target=server.shutdown)
stop_thread.start()
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + 'Handler',
(_StreamHandlerWrapper,),
{'DELEGATE_CLASS': partial(handler_class,
check_parent_process=check_parent_process),
'SHUTDOWN_CALL': partial(shutdown_server, check_parent_process)}
)
server = socketserver.TCPServer((bind_addr, port), wrapper_class, bind_and_activate=False)
server.allow_reuse_address = True
try:
server.server_bind()
server.server_activate()
log.info('Serving %s on (%s, %s)', handler_class.__name__, bind_addr, port)
server.serve_forever()
finally:
log.info('Shutting down')
server.server_close()
def start_io_lang_server(rfile, wfile, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLSPServer):
raise ValueError('Handler class must be an instance of PythonLSPServer')
log.info('Starting %s IO language server', handler_class.__name__)
server = handler_class(rfile, wfile, check_parent_process)
server.start()
class PythonLSPServer(MethodDispatcher):
""" Implementation of the Microsoft VSCode Language Server Protocol
https://github.com/Microsoft/language-server-protocol/blob/master/versions/protocol-1-x.md
"""
# pylint: disable=too-many-public-methods,redefined-builtin
def __init__(self, rx, tx, check_parent_process=False):
self.workspace = None
self.config = None
self.root_uri = None
self.watching_thread = None
self.workspaces = {}
self.uri_workspace_mapper = {}
self._jsonrpc_stream_reader = JsonRpcStreamReader(rx)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(tx)
self._check_parent_process = check_parent_process
self._endpoint = Endpoint(self, self._jsonrpc_stream_writer.write, max_workers=MAX_WORKERS)
self._dispatchers = []
self._shutdown = False
def start(self):
"""Entry point for the server."""
self._jsonrpc_stream_reader.listen(self._endpoint.consume)
def __getitem__(self, item):
"""Override getitem to fallback through multiple dispatchers."""
if self._shutdown and item != 'exit':
# exit is the only allowed method during shutdown
log.debug("Ignoring non-exit method during shutdown: %s", item)
raise KeyError
try:
return super().__getitem__(item)
except KeyError:
# Fallback through extra dispatchers
for dispatcher in self._dispatchers:
try:
return dispatcher[item]
except KeyError:
continue
raise KeyError()
def m_shutdown(self, **_kwargs):
self._shutdown = True
def m_exit(self, **_kwargs):
self._endpoint.shutdown()
self._jsonrpc_stream_reader.close()
self._jsonrpc_stream_writer.close()
def _match_uri_to_workspace(self, uri):
workspace_uri = _utils.match_uri_to_workspace(uri, self.workspaces)
return self.workspaces.get(workspace_uri, self.workspace)
def _hook(self, hook_name, doc_uri=None, **kwargs):
"""Calls hook_name and returns a list of results from all registered handlers"""
workspace = self._match_uri_to_workspace(doc_uri)
doc = workspace.get_document(doc_uri) if doc_uri else None
hook_handlers = self.config.plugin_manager.subset_hook_caller(hook_name, self.config.disabled_plugins)
return hook_handlers(config=self.config, workspace=workspace, document=doc, **kwargs)
def capabilities(self):
server_capabilities = {
'codeActionProvider': True,
'codeLensProvider': {
'resolveProvider': False, # We may need to make this configurable
},
'completionProvider': {
'resolveProvider': False, # We know everything ahead of time
'triggerCharacters': ['.']
},
'documentFormattingProvider': True,
'documentHighlightProvider': True,
'documentRangeFormattingProvider': True,
'documentSymbolProvider': True,
'definitionProvider': True,
'executeCommandProvider': {
'commands': flatten(self._hook('pylsp_commands'))
},
'hoverProvider': True,
'referencesProvider': True,
'renameProvider': True,
'foldingRangeProvider': True,
'signatureHelpProvider': {
'triggerCharacters': ['(', ',', '=']
},
'textDocumentSync': {
'change': lsp.TextDocumentSyncKind.INCREMENTAL,
'save': {
'includeText': True,
},
'openClose': True,
},
'workspace': {
'workspaceFolders': {
'supported': True,
'changeNotifications': True
}
},
'experimental': merge(
self._hook('pylsp_experimental_capabilities'))
}
log.info('Server capabilities: %s', server_capabilities)
return server_capabilities
def m_initialize(self, processId=None, rootUri=None, rootPath=None, initializationOptions=None, **_kwargs):
log.debug('Language server initialized with %s %s %s %s', processId, rootUri, rootPath, initializationOptions)
if rootUri is None:
rootUri = uris.from_fs_path(rootPath) if rootPath is not None else ''
self.workspaces.pop(self.root_uri, None)
self.root_uri = rootUri
self.config = config.Config(rootUri, initializationOptions or {},
processId, _kwargs.get('capabilities', {}))
self.workspace = Workspace(rootUri, self._endpoint, self.config)
self.workspaces[rootUri] = self.workspace
self._dispatchers = self._hook('pylsp_dispatchers')
self._hook('pylsp_initialize')
if self._check_parent_process and processId is not None and self.watching_thread is None:
def watch_parent_process(pid):
# exit when the given pid is not alive
if not _utils.is_process_alive(pid):
log.info("parent process %s is not alive, exiting!", pid)
self.m_exit()
else:
threading.Timer(PARENT_PROCESS_WATCH_INTERVAL, watch_parent_process, args=[pid]).start()
self.watching_thread = threading.Thread(target=watch_parent_process, args=(processId,))
self.watching_thread.daemon = True
self.watching_thread.start()
# Get our capabilities
return {'capabilities': self.capabilities()}
def m_initialized(self, **_kwargs):
self._hook('pylsp_initialized')
def code_actions(self, doc_uri, range, context):
return flatten(self._hook('pylsp_code_actions', doc_uri, range=range, context=context))
def code_lens(self, doc_uri):
return flatten(self._hook('pylsp_code_lens', doc_uri))
def completions(self, doc_uri, position):
completions = self._hook('pylsp_completions', doc_uri, position=position)
return {
'isIncomplete': False,
'items': flatten(completions)
}
def definitions(self, doc_uri, position):
return flatten(self._hook('pylsp_definitions', doc_uri, position=position))
def document_symbols(self, doc_uri):
return flatten(self._hook('pylsp_document_symbols', doc_uri))
def execute_command(self, command, arguments):
return self._hook('pylsp_execute_command', command=command, arguments=arguments)
def format_document(self, doc_uri):
return self._hook('pylsp_format_document', doc_uri)
def format_range(self, doc_uri, range):
return self._hook('pylsp_format_range', doc_uri, range=range)
def highlight(self, doc_uri, position):
return flatten(self._hook('pylsp_document_highlight', doc_uri, position=position)) or None
def hover(self, doc_uri, position):
return self._hook('pylsp_hover', doc_uri, position=position) or {'contents': ''}
@_utils.debounce(LINT_DEBOUNCE_S, keyed_by='doc_uri')
def lint(self, doc_uri, is_saved):
# Since we're debounced, the document may no longer be open
workspace = self._match_uri_to_workspace(doc_uri)
if doc_uri in workspace.documents:
workspace.publish_diagnostics(
doc_uri,
flatten(self._hook('pylsp_lint', doc_uri, is_saved=is_saved))
)
def references(self, doc_uri, position, exclude_declaration):
return flatten(self._hook(
'pylsp_references', doc_uri, position=position,
exclude_declaration=exclude_declaration
))
def rename(self, doc_uri, position, new_name):
return self._hook('pylsp_rename', doc_uri, position=position, new_name=new_name)
def signature_help(self, doc_uri, position):
return self._hook('pylsp_signature_help', doc_uri, position=position)
def folding(self, doc_uri):
return flatten(self._hook('pylsp_folding_range', doc_uri))
def m_text_document__did_close(self, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
workspace.rm_document(textDocument['uri'])
def m_text_document__did_open(self, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
workspace.put_document(textDocument['uri'], textDocument['text'], version=textDocument.get('version'))
self._hook('pylsp_document_did_open', textDocument['uri'])
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__did_change(self, contentChanges=None, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
for change in contentChanges:
workspace.update_document(
textDocument['uri'],
change,
version=textDocument.get('version')
)
self.lint(textDocument['uri'], is_saved=False)
def m_text_document__did_save(self, textDocument=None, **_kwargs):
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__code_action(self, textDocument=None, range=None, context=None, **_kwargs):
return self.code_actions(textDocument['uri'], range, context)
def m_text_document__code_lens(self, textDocument=None, **_kwargs):
return self.code_lens(textDocument['uri'])
def m_text_document__completion(self, textDocument=None, position=None, **_kwargs):
return self.completions(textDocument['uri'], position)
def m_text_document__definition(self, textDocument=None, position=None, **_kwargs):
return self.definitions(textDocument['uri'], position)
def m_text_document__document_highlight(self, textDocument=None, position=None, **_kwargs):
return self.highlight(textDocument['uri'], position)
def m_text_document__hover(self, textDocument=None, position=None, **_kwargs):
return self.hover(textDocument['uri'], position)
def m_text_document__document_symbol(self, textDocument=None, **_kwargs):
return self.document_symbols(textDocument['uri'])
def m_text_document__formatting(self, textDocument=None, _options=None, **_kwargs):
# For now we're ignoring formatting options.
return self.format_document(textDocument['uri'])
def m_text_document__rename(self, textDocument=None, position=None, newName=None, **_kwargs):
return self.rename(textDocument['uri'], position, newName)
def m_text_document__folding_range(self, textDocument=None, **_kwargs):
return self.folding(textDocument['uri'])
def m_text_document__range_formatting(self, textDocument=None, range=None, _options=None, **_kwargs):
# Again, we'll ignore formatting options for now.
return self.format_range(textDocument['uri'], range)
def m_text_document__references(self, textDocument=None, position=None, context=None, **_kwargs):
exclude_declaration = not context['includeDeclaration']
return self.references(textDocument['uri'], position, exclude_declaration)
def m_text_document__signature_help(self, textDocument=None, position=None, **_kwargs):
return self.signature_help(textDocument['uri'], position)
def m_workspace__did_change_configuration(self, settings=None):
self.config.update((settings or {}).get('pylsp', {}))
for workspace_uri in self.workspaces:
workspace = self.workspaces[workspace_uri]
workspace.update_config(settings)
for doc_uri in workspace.documents:
self.lint(doc_uri, is_saved=False)
def m_workspace__did_change_workspace_folders(self, event=None, **_kwargs): # pylint: disable=too-many-locals
if event is None:
return
added = event.get('added', [])
removed = event.get('removed', [])
for removed_info in removed:
if 'uri' in removed_info:
removed_uri = removed_info['uri']
self.workspaces.pop(removed_uri, None)
for added_info in added:
if 'uri' in added_info:
added_uri = added_info['uri']
workspace_config = config.Config(
added_uri, self.config._init_opts,
self.config._process_id, self.config._capabilities)
workspace_config.update(self.config._settings)
self.workspaces[added_uri] = Workspace(
added_uri, self._endpoint, workspace_config)
root_workspace_removed = any(removed_info['uri'] == self.root_uri for removed_info in removed)
workspace_added = len(added) > 0 and 'uri' in added[0]
if root_workspace_removed and workspace_added:
added_uri = added[0]['uri']
self.root_uri = added_uri
new_root_workspace = self.workspaces[added_uri]
self.config = new_root_workspace._config
self.workspace = new_root_workspace
elif root_workspace_removed:
# NOTE: Removing the root workspace can only happen when the server
# is closed, thus the else condition of this if can never happen.
if self.workspaces:
log.debug('Root workspace deleted!')
available_workspaces = sorted(self.workspaces)
first_workspace = available_workspaces[0]
new_root_workspace = self.workspaces[first_workspace]
self.root_uri = first_workspace
self.config = new_root_workspace._config
self.workspace = new_root_workspace
# Migrate documents that are on the root workspace and have a better
# match now
doc_uris = list(self.workspace._docs.keys())
for uri in doc_uris:
doc = self.workspace._docs.pop(uri)
new_workspace = self._match_uri_to_workspace(uri)
new_workspace._docs[uri] = doc
def m_workspace__did_change_watched_files(self, changes=None, **_kwargs):
changed_py_files = set()
config_changed = False
for d in (changes or []):
if d['uri'].endswith(PYTHON_FILE_EXTENSIONS):
changed_py_files.add(d['uri'])
elif d['uri'].endswith(CONFIG_FILEs):
config_changed = True
if config_changed:
self.config.settings.cache_clear()
elif not changed_py_files:
# Only externally changed python files and lint configs may result in changed diagnostics.
return
for workspace_uri in self.workspaces:
workspace = self.workspaces[workspace_uri]
for doc_uri in workspace.documents:
# Changes in doc_uri are already handled by m_text_document__did_save
if doc_uri not in changed_py_files:
self.lint(doc_uri, is_saved=False)
def m_workspace__execute_command(self, command=None, arguments=None):
return self.execute_command(command, arguments)
def flatten(list_of_lists):
return [item for lst in list_of_lists for item in lst]
def merge(list_of_dicts):
return {k: v for dictionary in list_of_dicts for k, v in dictionary.items()}
|
process_base.py | import abc
import logging
import os
import signal
import subprocess
import threading
from react.observable import ReplayObservable
from utils import os_utils
LOGGER = logging.getLogger('script_server.process_base')
class ProcessWrapper(metaclass=abc.ABCMeta):
def __init__(self, command, working_directory, env_variables):
self.process = None
self.working_directory = working_directory
self.command = command
self.env_variables = env_variables
self.finish_listeners = []
# output_stream is guaranteed to close not earlier than process exit
self.output_stream = ReplayObservable()
self.notify_finish_thread = None
def start(self):
self.start_execution(self.command, self.working_directory)
read_output_thread = threading.Thread(target=self.pipe_process_output)
read_output_thread.start()
self.notify_finish_thread = threading.Thread(target=self.notify_finished)
self.notify_finish_thread.start()
def prepare_env_variables(self):
env_variables = dict(os.environ, **self.env_variables)
if 'PYTHONUNBUFFERED' not in env_variables:
env_variables['PYTHONUNBUFFERED'] = '1'
return env_variables
@abc.abstractmethod
def pipe_process_output(self):
pass
@abc.abstractmethod
def start_execution(self, command, working_directory):
pass
@abc.abstractmethod
def write_to_input(self, value):
pass
@abc.abstractmethod
def wait_finish(self):
pass
def get_process_id(self):
return self.process.pid
def is_finished(self):
return self.process.poll() is not None
def get_return_code(self):
return self.process.returncode
def _write_script_output(self, text):
self.output_stream.push(text)
def stop(self):
if not self.is_finished():
if not os_utils.is_win():
group_id = os.getpgid(self.get_process_id())
os.killpg(group_id, signal.SIGTERM)
class KillChildren(object):
def finished(self):
try:
os.killpg(group_id, signal.SIGKILL)
except ProcessLookupError:
# probably there are no children left
pass
self.add_finish_listener(KillChildren())
else:
self.process.terminate()
self._write_script_output('\n>> STOPPED BY USER\n')
def kill(self):
if not self.is_finished():
if not os_utils.is_win():
group_id = os.getpgid(self.get_process_id())
os.killpg(group_id, signal.SIGKILL)
self._write_script_output('\n>> KILLED\n')
else:
subprocess.Popen("taskkill /F /T /PID " + self.get_process_id())
def add_finish_listener(self, listener):
if self.is_finished():
listener.finished()
return
self.finish_listeners.append(listener)
def notify_finished(self):
self.wait_finish()
for listener in self.finish_listeners:
try:
listener.finished()
except:
LOGGER.exception('Failed to notify listener: ' + str(listener))
def cleanup(self):
self.output_stream.dispose()
|
language.py | # coding: utf8
from __future__ import absolute_import, unicode_literals
import random
import itertools
from spacy.util import minibatch
import weakref
import functools
from collections import OrderedDict
from contextlib import contextmanager
from copy import copy, deepcopy
from thinc.neural import Model
import srsly
import multiprocessing as mp
from itertools import chain, cycle
from .tokenizer import Tokenizer
from .vocab import Vocab
from .lemmatizer import Lemmatizer
from .lookups import Lookups
from .analysis import analyze_pipes, analyze_all_pipes, validate_attrs
from .compat import izip, basestring_, is_python2, class_types
from .gold import GoldParse
from .scorer import Scorer
from ._ml import link_vectors_to_models, create_default_optimizer
from .attrs import IS_STOP, LANG
from .lang.punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .lang.punctuation import TOKENIZER_INFIXES
from .lang.tokenizer_exceptions import TOKEN_MATCH
from .lang.tag_map import TAG_MAP
from .tokens import Doc
from .lang.lex_attrs import LEX_ATTRS, is_stop
from .errors import Errors, Warnings, deprecation_warning, user_warning
from . import util
from . import about
ENABLE_PIPELINE_ANALYSIS = False
class BaseDefaults(object):
@classmethod
def create_lemmatizer(cls, nlp=None, lookups=None):
if lookups is None:
lookups = cls.create_lookups(nlp=nlp)
return Lemmatizer(lookups=lookups)
@classmethod
def create_lookups(cls, nlp=None):
root = util.get_module_path(cls)
filenames = {name: root / filename for name, filename in cls.resources}
if LANG in cls.lex_attr_getters:
lang = cls.lex_attr_getters[LANG](None)
user_lookups = util.get_entry_point(util.ENTRY_POINTS.lookups, lang, {})
filenames.update(user_lookups)
lookups = Lookups()
for name, filename in filenames.items():
data = util.load_language_data(filename)
lookups.add_table(name, data)
return lookups
@classmethod
def create_vocab(cls, nlp=None):
lookups = cls.create_lookups(nlp)
lemmatizer = cls.create_lemmatizer(nlp, lookups=lookups)
lex_attr_getters = dict(cls.lex_attr_getters)
# This is messy, but it's the minimal working fix to Issue #639.
lex_attr_getters[IS_STOP] = functools.partial(is_stop, stops=cls.stop_words)
vocab = Vocab(
lex_attr_getters=lex_attr_getters,
tag_map=cls.tag_map,
lemmatizer=lemmatizer,
lookups=lookups,
)
for tag_str, exc in cls.morph_rules.items():
for orth_str, attrs in exc.items():
vocab.morphology.add_special_case(tag_str, orth_str, attrs)
return vocab
@classmethod
def create_tokenizer(cls, nlp=None):
rules = cls.tokenizer_exceptions
token_match = cls.token_match
prefix_search = (
util.compile_prefix_regex(cls.prefixes).search if cls.prefixes else None
)
suffix_search = (
util.compile_suffix_regex(cls.suffixes).search if cls.suffixes else None
)
infix_finditer = (
util.compile_infix_regex(cls.infixes).finditer if cls.infixes else None
)
vocab = nlp.vocab if nlp is not None else cls.create_vocab(nlp)
return Tokenizer(
vocab,
rules=rules,
prefix_search=prefix_search,
suffix_search=suffix_search,
infix_finditer=infix_finditer,
token_match=token_match,
)
pipe_names = ["tagger", "parser", "ner"]
token_match = TOKEN_MATCH
prefixes = tuple(TOKENIZER_PREFIXES)
suffixes = tuple(TOKENIZER_SUFFIXES)
infixes = tuple(TOKENIZER_INFIXES)
tag_map = dict(TAG_MAP)
tokenizer_exceptions = {}
stop_words = set()
morph_rules = {}
lex_attr_getters = LEX_ATTRS
syntax_iterators = {}
resources = {}
writing_system = {"direction": "ltr", "has_case": True, "has_letters": True}
single_orth_variants = []
paired_orth_variants = []
class Language(object):
"""A text-processing pipeline. Usually you'll load this once per process,
and pass the instance around your application.
Defaults (class): Settings, data and factory methods for creating the `nlp`
object and processing pipeline.
lang (unicode): Two-letter language ID, i.e. ISO code.
DOCS: https://spacy.io/api/language
"""
Defaults = BaseDefaults
lang = None
factories = {"tokenizer": lambda nlp: nlp.Defaults.create_tokenizer(nlp)}
def __init__(
self, vocab=True, make_doc=True, max_length=10 ** 6, meta={}, **kwargs
):
"""Initialise a Language object.
vocab (Vocab): A `Vocab` object. If `True`, a vocab is created via
`Language.Defaults.create_vocab`.
make_doc (callable): A function that takes text and returns a `Doc`
object. Usually a `Tokenizer`.
meta (dict): Custom meta data for the Language class. Is written to by
models to add model meta data.
max_length (int) :
Maximum number of characters in a single text. The current v2 models
may run out memory on extremely long texts, due to large internal
allocations. You should segment these texts into meaningful units,
e.g. paragraphs, subsections etc, before passing them to spaCy.
Default maximum length is 1,000,000 characters (1mb). As a rule of
thumb, if all pipeline components are enabled, spaCy's default
models currently requires roughly 1GB of temporary memory per
100,000 characters in one text.
RETURNS (Language): The newly constructed object.
"""
user_factories = util.get_entry_points(util.ENTRY_POINTS.factories)
self.factories.update(user_factories)
self._meta = dict(meta)
self._path = None
if vocab is True:
factory = self.Defaults.create_vocab
vocab = factory(self, **meta.get("vocab", {}))
if vocab.vectors.name is None:
vocab.vectors.name = meta.get("vectors", {}).get("name")
else:
if (self.lang and vocab.lang) and (self.lang != vocab.lang):
raise ValueError(Errors.E150.format(nlp=self.lang, vocab=vocab.lang))
self.vocab = vocab
if make_doc is True:
factory = self.Defaults.create_tokenizer
make_doc = factory(self, **meta.get("tokenizer", {}))
self.tokenizer = make_doc
self.pipeline = []
self.max_length = max_length
self._optimizer = None
@property
def path(self):
return self._path
@property
def meta(self):
if self.vocab.lang:
self._meta.setdefault("lang", self.vocab.lang)
else:
self._meta.setdefault("lang", self.lang)
self._meta.setdefault("name", "model")
self._meta.setdefault("version", "0.0.0")
self._meta.setdefault("spacy_version", ">={}".format(about.__version__))
self._meta.setdefault("description", "")
self._meta.setdefault("author", "")
self._meta.setdefault("email", "")
self._meta.setdefault("url", "")
self._meta.setdefault("license", "")
self._meta["vectors"] = {
"width": self.vocab.vectors_length,
"vectors": len(self.vocab.vectors),
"keys": self.vocab.vectors.n_keys,
"name": self.vocab.vectors.name,
}
self._meta["pipeline"] = self.pipe_names
self._meta["factories"] = self.pipe_factories
self._meta["labels"] = self.pipe_labels
return self._meta
@meta.setter
def meta(self, value):
self._meta = value
# Conveniences to access pipeline components
# Shouldn't be used anymore!
@property
def tensorizer(self):
return self.get_pipe("tensorizer")
@property
def tagger(self):
return self.get_pipe("tagger")
@property
def parser(self):
return self.get_pipe("parser")
@property
def entity(self):
return self.get_pipe("ner")
@property
def linker(self):
return self.get_pipe("entity_linker")
@property
def matcher(self):
return self.get_pipe("matcher")
@property
def pipe_names(self):
"""Get names of available pipeline components.
RETURNS (list): List of component name strings, in order.
"""
return [pipe_name for pipe_name, _ in self.pipeline]
@property
def pipe_factories(self):
"""Get the component factories for the available pipeline components.
RETURNS (dict): Factory names, keyed by component names.
"""
factories = {}
for pipe_name, pipe in self.pipeline:
factories[pipe_name] = getattr(pipe, "factory", pipe_name)
return factories
@property
def pipe_labels(self):
"""Get the labels set by the pipeline components, if available (if
the component exposes a labels property).
RETURNS (dict): Labels keyed by component name.
"""
labels = OrderedDict()
for name, pipe in self.pipeline:
if hasattr(pipe, "labels"):
labels[name] = list(pipe.labels)
return labels
def get_pipe(self, name):
"""Get a pipeline component for a given component name.
name (unicode): Name of pipeline component to get.
RETURNS (callable): The pipeline component.
DOCS: https://spacy.io/api/language#get_pipe
"""
for pipe_name, component in self.pipeline:
if pipe_name == name:
return component
raise KeyError(Errors.E001.format(name=name, opts=self.pipe_names))
def create_pipe(self, name, config=dict()):
"""Create a pipeline component from a factory.
name (unicode): Factory name to look up in `Language.factories`.
config (dict): Configuration parameters to initialise component.
RETURNS (callable): Pipeline component.
DOCS: https://spacy.io/api/language#create_pipe
"""
if name not in self.factories:
if name == "sbd":
raise KeyError(Errors.E108.format(name=name))
else:
raise KeyError(Errors.E002.format(name=name))
factory = self.factories[name]
return factory(self, **config)
def add_pipe(
self, component, name=None, before=None, after=None, first=None, last=None
):
"""Add a component to the processing pipeline. Valid components are
callables that take a `Doc` object, modify it and return it. Only one
of before/after/first/last can be set. Default behaviour is "last".
component (callable): The pipeline component.
name (unicode): Name of pipeline component. Overwrites existing
component.name attribute if available. If no name is set and
the component exposes no name attribute, component.__name__ is
used. An error is raised if a name already exists in the pipeline.
before (unicode): Component name to insert component directly before.
after (unicode): Component name to insert component directly after.
first (bool): Insert component first / not first in the pipeline.
last (bool): Insert component last / not last in the pipeline.
DOCS: https://spacy.io/api/language#add_pipe
"""
if not hasattr(component, "__call__"):
msg = Errors.E003.format(component=repr(component), name=name)
if isinstance(component, basestring_) and component in self.factories:
msg += Errors.E004.format(component=component)
raise ValueError(msg)
if name is None:
name = util.get_component_name(component)
if name in self.pipe_names:
raise ValueError(Errors.E007.format(name=name, opts=self.pipe_names))
if sum([bool(before), bool(after), bool(first), bool(last)]) >= 2:
raise ValueError(Errors.E006)
pipe_index = 0
pipe = (name, component)
if last or not any([first, before, after]):
pipe_index = len(self.pipeline)
self.pipeline.append(pipe)
elif first:
self.pipeline.insert(0, pipe)
elif before and before in self.pipe_names:
pipe_index = self.pipe_names.index(before)
self.pipeline.insert(self.pipe_names.index(before), pipe)
elif after and after in self.pipe_names:
pipe_index = self.pipe_names.index(after) + 1
self.pipeline.insert(self.pipe_names.index(after) + 1, pipe)
else:
raise ValueError(
Errors.E001.format(name=before or after, opts=self.pipe_names)
)
if ENABLE_PIPELINE_ANALYSIS:
analyze_pipes(self.pipeline, name, component, pipe_index)
def has_pipe(self, name):
"""Check if a component name is present in the pipeline. Equivalent to
`name in nlp.pipe_names`.
name (unicode): Name of the component.
RETURNS (bool): Whether a component of the name exists in the pipeline.
DOCS: https://spacy.io/api/language#has_pipe
"""
return name in self.pipe_names
def replace_pipe(self, name, component):
"""Replace a component in the pipeline.
name (unicode): Name of the component to replace.
component (callable): Pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
if not hasattr(component, "__call__"):
msg = Errors.E003.format(component=repr(component), name=name)
if isinstance(component, basestring_) and component in self.factories:
msg += Errors.E135.format(name=name)
raise ValueError(msg)
self.pipeline[self.pipe_names.index(name)] = (name, component)
if ENABLE_PIPELINE_ANALYSIS:
analyze_all_pipes(self.pipeline)
def rename_pipe(self, old_name, new_name):
"""Rename a pipeline component.
old_name (unicode): Name of the component to rename.
new_name (unicode): New name of the component.
DOCS: https://spacy.io/api/language#rename_pipe
"""
if old_name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names))
if new_name in self.pipe_names:
raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names))
i = self.pipe_names.index(old_name)
self.pipeline[i] = (new_name, self.pipeline[i][1])
def remove_pipe(self, name):
"""Remove a component from the pipeline.
name (unicode): Name of the component to remove.
RETURNS (tuple): A `(name, component)` tuple of the removed component.
DOCS: https://spacy.io/api/language#remove_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
removed = self.pipeline.pop(self.pipe_names.index(name))
if ENABLE_PIPELINE_ANALYSIS:
analyze_all_pipes(self.pipeline)
return removed
def __call__(self, text, disable=[], component_cfg=None):
"""Apply the pipeline to some text. The text can span multiple sentences,
and can contain arbtrary whitespace. Alignment into the original string
is preserved.
text (unicode): The text to be processed.
disable (list): Names of the pipeline components to disable.
component_cfg (dict): An optional dictionary with extra keyword arguments
for specific components.
RETURNS (Doc): A container for accessing the annotations.
DOCS: https://spacy.io/api/language#call
"""
if len(text) > self.max_length:
raise ValueError(
Errors.E088.format(length=len(text), max_length=self.max_length)
)
doc = self.make_doc(text)
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if name in disable:
continue
if not hasattr(proc, "__call__"):
raise ValueError(Errors.E003.format(component=type(proc), name=name))
doc = proc(doc, **component_cfg.get(name, {}))
if doc is None:
raise ValueError(Errors.E005.format(name=name))
return doc
def disable_pipes(self, *names):
"""Disable one or more pipeline components. If used as a context
manager, the pipeline will be restored to the initial state at the end
of the block. Otherwise, a DisabledPipes object is returned, that has
a `.restore()` method you can use to undo your changes.
DOCS: https://spacy.io/api/language#disable_pipes
"""
if len(names) == 1 and isinstance(names[0], (list, tuple)):
names = names[0] # support list of names instead of spread
return DisabledPipes(self, *names)
def make_doc(self, text):
return self.tokenizer(text)
def _format_docs_and_golds(self, docs, golds):
"""Format golds and docs before update models."""
expected_keys = ("words", "tags", "heads", "deps", "entities", "cats", "links")
gold_objs = []
doc_objs = []
for doc, gold in zip(docs, golds):
if isinstance(doc, basestring_):
doc = self.make_doc(doc)
if not isinstance(gold, GoldParse):
unexpected = [k for k in gold if k not in expected_keys]
if unexpected:
err = Errors.E151.format(unexp=unexpected, exp=expected_keys)
raise ValueError(err)
gold = GoldParse(doc, **gold)
doc_objs.append(doc)
gold_objs.append(gold)
return doc_objs, gold_objs
def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg=None):
"""Update the models in the pipeline.
docs (iterable): A batch of `Doc` objects.
golds (iterable): A batch of `GoldParse` objects.
drop (float): The dropout rate.
sgd (callable): An optimizer.
losses (dict): Dictionary to update with the loss, keyed by component.
component_cfg (dict): Config parameters for specific pipeline
components, keyed by component name.
DOCS: https://spacy.io/api/language#update
"""
if len(docs) != len(golds):
raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds)))
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
# Allow dict of args to GoldParse, instead of GoldParse objects.
docs, golds = self._format_docs_and_golds(docs, golds)
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
pipes = list(self.pipeline)
random.shuffle(pipes)
if component_cfg is None:
component_cfg = {}
for name, proc in pipes:
if not hasattr(proc, "update"):
continue
grads = {}
kwargs = component_cfg.get(name, {})
kwargs.setdefault("drop", drop)
proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs)
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
def rehearse(self, docs, sgd=None, losses=None, config=None):
"""Make a "rehearsal" update to the models in the pipeline, to prevent
forgetting. Rehearsal updates run an initial copy of the model over some
data, and update the model so its current predictions are more like the
initial ones. This is useful for keeping a pretrained model on-track,
even if you're updating it with a smaller set of examples.
docs (iterable): A batch of `Doc` objects.
drop (float): The dropout rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
EXAMPLE:
>>> raw_text_batches = minibatch(raw_texts)
>>> for labelled_batch in minibatch(zip(train_docs, train_golds)):
>>> docs, golds = zip(*train_docs)
>>> nlp.update(docs, golds)
>>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)]
>>> nlp.rehearse(raw_batch)
"""
# TODO: document
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
docs = list(docs)
for i, doc in enumerate(docs):
if isinstance(doc, basestring_):
docs[i] = self.make_doc(doc)
pipes = list(self.pipeline)
random.shuffle(pipes)
if config is None:
config = {}
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
for name, proc in pipes:
if not hasattr(proc, "rehearse"):
continue
grads = {}
proc.rehearse(docs, sgd=get_grads, losses=losses, **config.get(name, {}))
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
return losses
def preprocess_gold(self, docs_golds):
"""Can be called before training to pre-process gold data. By default,
it handles nonprojectivity and adds missing tags to the tag map.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
"""
for name, proc in self.pipeline:
if hasattr(proc, "preprocess_gold"):
docs_golds = proc.preprocess_gold(docs_golds)
for doc, gold in docs_golds:
yield doc, gold
def begin_training(self, get_gold_tuples=None, sgd=None, component_cfg=None, **cfg):
"""Allocate models, pre-process training data and acquire a trainer and
optimizer. Used as a contextmanager.
get_gold_tuples (function): Function returning gold data
component_cfg (dict): Config parameters for specific components.
**cfg: Config parameters.
RETURNS: An optimizer.
DOCS: https://spacy.io/api/language#begin_training
"""
if get_gold_tuples is None:
get_gold_tuples = lambda: []
# Populate vocab
else:
for _, annots_brackets in get_gold_tuples():
_ = annots_brackets.pop()
for annots, _ in annots_brackets:
for word in annots[1]:
_ = self.vocab[word] # noqa: F841
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if hasattr(proc, "begin_training"):
kwargs = component_cfg.get(name, {})
kwargs.update(cfg)
proc.begin_training(
get_gold_tuples,
pipeline=self.pipeline,
sgd=self._optimizer,
**kwargs
)
return self._optimizer
def resume_training(self, sgd=None, **cfg):
"""Continue training a pretrained model.
Create and return an optimizer, and initialize "rehearsal" for any pipeline
component that has a .rehearse() method. Rehearsal is used to prevent
models from "forgetting" their initialised "knowledge". To perform
rehearsal, collect samples of text you want the models to retain performance
on, and call nlp.rehearse() with a batch of Doc objects.
"""
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
for name, proc in self.pipeline:
if hasattr(proc, "_rehearsal_model"):
proc._rehearsal_model = deepcopy(proc.model)
return self._optimizer
def evaluate(
self, docs_golds, verbose=False, batch_size=256, scorer=None, component_cfg=None
):
"""Evaluate a model's pipeline components.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
verbose (bool): Print debugging information.
batch_size (int): Batch size to use.
scorer (Scorer): Optional `Scorer` to use. If not passed in, a new one
will be created.
component_cfg (dict): An optional dictionary with extra keyword
arguments for specific components.
RETURNS (Scorer): The scorer containing the evaluation results.
DOCS: https://spacy.io/api/language#evaluate
"""
if scorer is None:
scorer = Scorer(pipeline=self.pipeline)
if component_cfg is None:
component_cfg = {}
docs, golds = zip(*docs_golds)
docs = [
self.make_doc(doc) if isinstance(doc, basestring_) else doc for doc in docs
]
golds = list(golds)
for name, pipe in self.pipeline:
kwargs = component_cfg.get(name, {})
kwargs.setdefault("batch_size", batch_size)
if not hasattr(pipe, "pipe"):
docs = _pipe(pipe, docs, kwargs)
else:
docs = pipe.pipe(docs, **kwargs)
for doc, gold in zip(docs, golds):
if not isinstance(gold, GoldParse):
gold = GoldParse(doc, **gold)
if verbose:
print(doc)
kwargs = component_cfg.get("scorer", {})
kwargs.setdefault("verbose", verbose)
scorer.score(doc, gold, **kwargs)
return scorer
@contextmanager
def use_params(self, params, **cfg):
"""Replace weights of models in the pipeline with those provided in the
params dictionary. Can be used as a contextmanager, in which case,
models go back to their original weights after the block.
params (dict): A dictionary of parameters keyed by model ID.
**cfg: Config parameters.
EXAMPLE:
>>> with nlp.use_params(optimizer.averages):
>>> nlp.to_disk('/tmp/checkpoint')
"""
contexts = [
pipe.use_params(params)
for name, pipe in self.pipeline
if hasattr(pipe, "use_params")
]
# TODO: Having trouble with contextlib
# Workaround: these aren't actually context managers atm.
for context in contexts:
try:
next(context)
except StopIteration:
pass
yield
for context in contexts:
try:
next(context)
except StopIteration:
pass
def pipe(
self,
texts,
as_tuples=False,
n_threads=-1,
batch_size=1000,
disable=[],
cleanup=False,
component_cfg=None,
n_process=1,
):
"""Process texts as a stream, and yield `Doc` objects in order.
texts (iterator): A sequence of texts to process.
as_tuples (bool): If set to True, inputs should be a sequence of
(text, context) tuples. Output will then be a sequence of
(doc, context) tuples. Defaults to False.
batch_size (int): The number of texts to buffer.
disable (list): Names of the pipeline components to disable.
cleanup (bool): If True, unneeded strings are freed to control memory
use. Experimental.
component_cfg (dict): An optional dictionary with extra keyword
arguments for specific components.
n_process (int): Number of processors to process texts, only supported
in Python3. If -1, set `multiprocessing.cpu_count()`.
YIELDS (Doc): Documents in the order of the original text.
DOCS: https://spacy.io/api/language#pipe
"""
# raw_texts will be used later to stop iterator.
texts, raw_texts = itertools.tee(texts)
if is_python2 and n_process != 1:
user_warning(Warnings.W023)
n_process = 1
if n_threads != -1:
deprecation_warning(Warnings.W016)
if n_process == -1:
n_process = mp.cpu_count()
if as_tuples:
text_context1, text_context2 = itertools.tee(texts)
texts = (tc[0] for tc in text_context1)
contexts = (tc[1] for tc in text_context2)
docs = self.pipe(
texts,
batch_size=batch_size,
disable=disable,
n_process=n_process,
component_cfg=component_cfg,
)
for doc, context in izip(docs, contexts):
yield (doc, context)
return
if component_cfg is None:
component_cfg = {}
pipes = (
[]
) # contains functools.partial objects so that easily create multiprocess worker.
for name, proc in self.pipeline:
if name in disable:
continue
kwargs = component_cfg.get(name, {})
# Allow component_cfg to overwrite the top-level kwargs.
kwargs.setdefault("batch_size", batch_size)
if hasattr(proc, "pipe"):
f = functools.partial(proc.pipe, **kwargs)
else:
# Apply the function, but yield the doc
f = functools.partial(_pipe, proc=proc, kwargs=kwargs)
pipes.append(f)
if n_process != 1:
docs = self._multiprocessing_pipe(texts, pipes, n_process, batch_size)
else:
# if n_process == 1, no processes are forked.
docs = (self.make_doc(text) for text in texts)
for pipe in pipes:
docs = pipe(docs)
# Track weakrefs of "recent" documents, so that we can see when they
# expire from memory. When they do, we know we don't need old strings.
# This way, we avoid maintaining an unbounded growth in string entries
# in the string store.
recent_refs = weakref.WeakSet()
old_refs = weakref.WeakSet()
# Keep track of the original string data, so that if we flush old strings,
# we can recover the original ones. However, we only want to do this if we're
# really adding strings, to save up-front costs.
original_strings_data = None
nr_seen = 0
for doc in docs:
yield doc
if cleanup:
recent_refs.add(doc)
if nr_seen < 10000:
old_refs.add(doc)
nr_seen += 1
elif len(old_refs) == 0:
old_refs, recent_refs = recent_refs, old_refs
if original_strings_data is None:
original_strings_data = list(self.vocab.strings)
else:
keys, strings = self.vocab.strings._cleanup_stale_strings(
original_strings_data
)
self.vocab._reset_cache(keys, strings)
self.tokenizer._reset_cache(keys)
nr_seen = 0
def _multiprocessing_pipe(self, texts, pipes, n_process, batch_size):
# raw_texts is used later to stop iteration.
texts, raw_texts = itertools.tee(texts)
# for sending texts to worker
texts_q = [mp.Queue() for _ in range(n_process)]
# for receiving byte encoded docs from worker
bytedocs_recv_ch, bytedocs_send_ch = zip(
*[mp.Pipe(False) for _ in range(n_process)]
)
batch_texts = minibatch(texts, batch_size)
# Sender sends texts to the workers.
# This is necessary to properly handle infinite length of texts.
# (In this case, all data cannot be sent to the workers at once)
sender = _Sender(batch_texts, texts_q, chunk_size=n_process)
# send twice so that make process busy
sender.send()
sender.send()
procs = [
mp.Process(target=_apply_pipes, args=(self.make_doc, pipes, rch, sch))
for rch, sch in zip(texts_q, bytedocs_send_ch)
]
for proc in procs:
proc.start()
# Cycle channels not to break the order of docs.
# The received object is batch of byte encoded docs, so flatten them with chain.from_iterable.
byte_docs = chain.from_iterable(recv.recv() for recv in cycle(bytedocs_recv_ch))
docs = (Doc(self.vocab).from_bytes(byte_doc) for byte_doc in byte_docs)
try:
for i, (_, doc) in enumerate(zip(raw_texts, docs), 1):
yield doc
if i % batch_size == 0:
# tell `sender` that one batch was consumed.
sender.step()
finally:
for proc in procs:
proc.terminate()
def to_disk(self, path, exclude=tuple(), disable=None):
"""Save the current state to a directory. If a model is loaded, this
will include the model.
path (unicode or Path): Path to a directory, which will be created if
it doesn't exist.
exclude (list): Names of components or serialization fields to exclude.
DOCS: https://spacy.io/api/language#to_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
serializers = OrderedDict()
serializers["tokenizer"] = lambda p: self.tokenizer.to_disk(
p, exclude=["vocab"]
)
serializers["meta.json"] = lambda p: p.open("w").write(
srsly.json_dumps(self.meta)
)
for name, proc in self.pipeline:
if not hasattr(proc, "name"):
continue
if name in exclude:
continue
if not hasattr(proc, "to_disk"):
continue
serializers[name] = lambda p, proc=proc: proc.to_disk(p, exclude=["vocab"])
serializers["vocab"] = lambda p: self.vocab.to_disk(p)
util.to_disk(path, serializers, exclude)
def from_disk(self, path, exclude=tuple(), disable=None):
"""Loads state from a directory. Modifies the object in place and
returns it. If the saved `Language` object contains a model, the
model will be loaded.
path (unicode or Path): A path to a directory.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The modified `Language` object.
DOCS: https://spacy.io/api/language#from_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
deserializers = OrderedDict()
deserializers["meta.json"] = lambda p: self.meta.update(srsly.read_json(p))
deserializers["vocab"] = lambda p: self.vocab.from_disk(
p
) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(
p, exclude=["vocab"]
)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_disk"):
continue
deserializers[name] = lambda p, proc=proc: proc.from_disk(
p, exclude=["vocab"]
)
if not (path / "vocab").exists() and "vocab" not in exclude:
# Convert to list here in case exclude is (default) tuple
exclude = list(exclude) + ["vocab"]
util.from_disk(path, deserializers, exclude)
self._path = path
return self
def to_bytes(self, exclude=tuple(), disable=None, **kwargs):
"""Serialize the current state to a binary string.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (bytes): The serialized form of the `Language` object.
DOCS: https://spacy.io/api/language#to_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
serializers = OrderedDict()
serializers["vocab"] = lambda: self.vocab.to_bytes()
serializers["tokenizer"] = lambda: self.tokenizer.to_bytes(exclude=["vocab"])
serializers["meta.json"] = lambda: srsly.json_dumps(self.meta)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "to_bytes"):
continue
serializers[name] = lambda proc=proc: proc.to_bytes(exclude=["vocab"])
exclude = util.get_serialization_exclude(serializers, exclude, kwargs)
return util.to_bytes(serializers, exclude)
def from_bytes(self, bytes_data, exclude=tuple(), disable=None, **kwargs):
"""Load state from a binary string.
bytes_data (bytes): The data to load from.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The `Language` object.
DOCS: https://spacy.io/api/language#from_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
deserializers = OrderedDict()
deserializers["meta.json"] = lambda b: self.meta.update(srsly.json_loads(b))
deserializers["vocab"] = lambda b: self.vocab.from_bytes(
b
) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda b: self.tokenizer.from_bytes(
b, exclude=["vocab"]
)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_bytes"):
continue
deserializers[name] = lambda b, proc=proc: proc.from_bytes(
b, exclude=["vocab"]
)
exclude = util.get_serialization_exclude(deserializers, exclude, kwargs)
util.from_bytes(bytes_data, deserializers, exclude)
return self
class component(object):
"""Decorator for pipeline components. Can decorate both function components
and class components and will automatically register components in the
Language.factories. If the component is a class and needs access to the
nlp object or config parameters, it can expose a from_nlp classmethod
that takes the nlp object and **cfg arguments and returns the initialized
component.
"""
# NB: This decorator needs to live here, because it needs to write to
# Language.factories. All other solutions would cause circular import.
def __init__(self, name=None, assigns=tuple(), requires=tuple(), retokenizes=False):
"""Decorate a pipeline component.
name (unicode): Default component and factory name.
assigns (list): Attributes assigned by component, e.g. `["token.pos"]`.
requires (list): Attributes required by component, e.g. `["token.dep"]`.
retokenizes (bool): Whether the component changes the tokenization.
"""
self.name = name
self.assigns = validate_attrs(assigns)
self.requires = validate_attrs(requires)
self.retokenizes = retokenizes
def __call__(self, *args, **kwargs):
obj = args[0]
args = args[1:]
factory_name = self.name or util.get_component_name(obj)
obj.name = factory_name
obj.factory = factory_name
obj.assigns = self.assigns
obj.requires = self.requires
obj.retokenizes = self.retokenizes
def factory(nlp, **cfg):
if hasattr(obj, "from_nlp"):
return obj.from_nlp(nlp, **cfg)
elif isinstance(obj, class_types):
return obj()
return obj
Language.factories[obj.factory] = factory
return obj
def _fix_pretrained_vectors_name(nlp):
# TODO: Replace this once we handle vectors consistently as static
# data
if "vectors" in nlp.meta and nlp.meta["vectors"].get("name"):
nlp.vocab.vectors.name = nlp.meta["vectors"]["name"]
elif not nlp.vocab.vectors.size:
nlp.vocab.vectors.name = None
elif "name" in nlp.meta and "lang" in nlp.meta:
vectors_name = "%s_%s.vectors" % (nlp.meta["lang"], nlp.meta["name"])
nlp.vocab.vectors.name = vectors_name
else:
raise ValueError(Errors.E092)
if nlp.vocab.vectors.size != 0:
link_vectors_to_models(nlp.vocab)
for name, proc in nlp.pipeline:
if not hasattr(proc, "cfg"):
continue
proc.cfg.setdefault("deprecation_fixes", {})
proc.cfg["deprecation_fixes"]["vectors_name"] = nlp.vocab.vectors.name
class DisabledPipes(list):
"""Manager for temporary pipeline disabling."""
def __init__(self, nlp, *names):
self.nlp = nlp
self.names = names
# Important! Not deep copy -- we just want the container (but we also
# want to support people providing arbitrarily typed nlp.pipeline
# objects.)
self.original_pipeline = copy(nlp.pipeline)
list.__init__(self)
self.extend(nlp.remove_pipe(name) for name in names)
def __enter__(self):
return self
def __exit__(self, *args):
self.restore()
def restore(self):
"""Restore the pipeline to its state when DisabledPipes was created."""
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline
unexpected = [name for name, pipe in current if not self.nlp.has_pipe(name)]
if unexpected:
# Don't change the pipeline if we're raising an error.
self.nlp.pipeline = current
raise ValueError(Errors.E008.format(names=unexpected))
self[:] = []
def _pipe(docs, proc, kwargs):
# We added some args for pipe that __call__ doesn't expect.
kwargs = dict(kwargs)
for arg in ["n_threads", "batch_size"]:
if arg in kwargs:
kwargs.pop(arg)
for doc in docs:
doc = proc(doc, **kwargs)
yield doc
def _apply_pipes(make_doc, pipes, reciever, sender):
"""Worker for Language.pipe
receiver (multiprocessing.Connection): Pipe to receive text. Usually
created by `multiprocessing.Pipe()`
sender (multiprocessing.Connection): Pipe to send doc. Usually created by
`multiprocessing.Pipe()`
"""
while True:
texts = reciever.get()
docs = (make_doc(text) for text in texts)
for pipe in pipes:
docs = pipe(docs)
# Connection does not accept unpickable objects, so send list.
sender.send([doc.to_bytes() for doc in docs])
class _Sender:
"""Util for sending data to multiprocessing workers in Language.pipe"""
def __init__(self, data, queues, chunk_size):
self.data = iter(data)
self.queues = iter(cycle(queues))
self.chunk_size = chunk_size
self.count = 0
def send(self):
"""Send chunk_size items from self.data to channels."""
for item, q in itertools.islice(
zip(self.data, cycle(self.queues)), self.chunk_size
):
# cycle channels so that distribute the texts evenly
q.put(item)
def step(self):
"""Tell sender that comsumed one item.
Data is sent to the workers after every chunk_size calls."""
self.count += 1
if self.count >= self.chunk_size:
self.count = 0
self.send()
|
kernel.py | from dizest import util
from dizest.core.obj import Workflow
import json
import os
import multiprocessing as mp
import psutil
import signal
import traceback
# kernel manager
def _manager(q):
q.send(str(os.getpid()))
msg = json.loads(q.recv())
mode = msg['mode']
data = msg['data']
workflow = Workflow(data['package'], **data['opts'])
kernel = workflow.kernel()
kernel.event_start()
is_stopped = False
while True:
try:
msg = json.loads(q.recv())
mode = msg['mode']
if 'data' in msg: data = msg['data']
else: data = None
if mode == 'wpdata':
workflow.update(data['package'], **data['opts'])
elif mode == 'run':
if is_stopped == False:
workflow.status.set(status='running')
workflow.status.send()
kernel.event_run(data)
else:
workflow.flow(data).status.set(status="ready", code=0, message="")
workflow.flow(data).status.send()
elif mode == 'sync':
try:
workflow.flow(data).output.save()
except:
pass
q.send(data)
elif mode == 'stop':
is_stopped = False
except KeyboardInterrupt:
is_stopped = True
workflow.flow().output.clear()
workflow.flow().status.set(status="ready", code=0, message="")
workflow.flow().status.send()
kernel.event_stop()
except Exception as e:
pass
try:
workflow.status.set(status='stop')
workflow.status.send()
except:
pass
class Base:
def __init__(self, workflow):
self.workflow = workflow
self.init()
# init kernel
def init(self):
self.p = None
self.q = None
self.workflow.status.clear()
def __del__(self):
self.kill()
# interface functions
def event_start(self):
pass
def event_stop(self):
pass
def event_run(self, flow_id):
pass
# kernel api functions
# start process
def start(self):
if self.p is not None:
raise Exception("Process already running")
workflow = self.workflow
self.init()
ctx = mp.get_context(workflow.opts.kernel_mode)
if workflow.opts.kernel_mode == 'spawn':
ctx.set_executable(workflow.opts.kernel_env)
sender, receiver = mp.Pipe()
self.p = ctx.Process(target=_manager, args=(receiver,))
self.p.start()
self.q = sender
cpid = sender.recv()
self.update()
# send action to process
def send(self, mode, data=None):
try:
msg = dict()
msg['mode'] = mode
msg['data'] = data
msg = json.dumps(msg, default=util.string.json_default)
self.q.send(msg)
except Exception as e:
pass
# stop process
def stop(self):
childpid = self.p.pid
child = psutil.Process(childpid)
child.send_signal(signal.SIGINT)
# cancel waiting works signal
self.send("stop")
return True
def kill(self):
try:
self.p.kill()
self.p.join()
except:
pass
processstate = self.is_alive()
if processstate:
return False
self.init()
workflow = self.workflow
for flow_id in workflow.flows():
flow = workflow.flow(flow_id)
flow.status.set(status="ready", code=0, message="")
flow.status.send()
return True
# update kernel data to process
def update(self):
if self.p is not None:
wpdata = dict(self.workflow.info())
self.send("wpdata", wpdata)
# run dizest code
def run(self, flow_id):
if self.p is None:
self.init()
raise Exception("no running process")
if self.is_alive() == False:
self.init()
raise Exception("no running process")
flow = self.workflow.flow(flow_id)
flow.status.set(code=0, status='pending', message='')
flow.status.send()
self.send("run", flow_id)
return self
# sync output using pickle
def sync(self, flow_id):
if self.q is None:
return
self.send("sync", flow_id)
fid = self.q.recv()
flow = self.workflow.flow(fid)
flow.output.load()
# check process is alive
def is_alive(self):
try:
if self.p is None:
return False
return self.p.is_alive()
except:
return False
class Single(Base):
def event_start(self):
pass
def event_run(self, flow_id):
flow = self.workflow.flow(flow_id)
flow.run()
def event_stop(self):
pass |
testing.py | """
Contains testing infrastructure for QCFractal.
"""
import os
import pkgutil
import shutil
import signal
import socket
import subprocess
import sys
import threading
import time
from collections import Mapping
from contextlib import contextmanager
import numpy as np
import pandas as pd
import pytest
import qcengine as qcng
import requests
from qcelemental.models import Molecule
from tornado.ioloop import IOLoop
from .interface import FractalClient
from .postgres_harness import PostgresHarness, TemporaryPostgres
from .queue import build_queue_adapter
from .server import FractalServer
from .snowflake import FractalSnowflake
from .storage_sockets import storage_socket_factory
### Addon testing capabilities
def pytest_addoption(parser):
"""
Additional PyTest CLI flags to add
See `pytest_collection_modifyitems` for handling and `pytest_configure` for adding known in-line marks.
"""
parser.addoption("--runslow", action="store_true", default=False, help="run slow tests")
parser.addoption("--runexamples", action="store_true", default=False, help="run example tests")
def pytest_collection_modifyitems(config, items):
"""
Handle test triggers based on the CLI flags
Use decorators:
@pytest.mark.slow
@pyrest.mark.example
"""
runslow = config.getoption("--runslow")
runexamples = config.getoption("--runexamples")
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
skip_example = pytest.mark.skip(reason="need --runexamples option to run")
for item in items:
if "slow" in item.keywords and not runslow:
item.add_marker(skip_slow)
if "example" in item.keywords and not runexamples:
item.add_marker(skip_example)
def pytest_configure(config):
import sys
sys._called_from_test = True
config.addinivalue_line("markers", "example: Mark a given test as an example which can be run")
config.addinivalue_line(
"markers", "slow: Mark a given test as slower than most other tests, needing a special " "flag to run."
)
def pytest_unconfigure(config):
import sys
del sys._called_from_test
def _plugin_import(plug):
plug_spec = pkgutil.find_loader(plug)
if plug_spec is None:
return False
else:
return True
_import_message = "Not detecting module {}. Install package if necessary and add to envvar PYTHONPATH"
_adapter_testing = ["pool", "dask", "fireworks", "parsl"]
# Figure out what is imported
_programs = {
"fireworks": _plugin_import("fireworks"),
"rdkit": _plugin_import("rdkit"),
"psi4": _plugin_import("psi4"),
"parsl": _plugin_import("parsl"),
"dask": _plugin_import("dask"),
"dask_jobqueue": _plugin_import("dask_jobqueue"),
"geometric": _plugin_import("geometric"),
"torsiondrive": _plugin_import("torsiondrive"),
"torchani": _plugin_import("torchani"),
}
if _programs["dask"]:
_programs["dask.distributed"] = _plugin_import("dask.distributed")
else:
_programs["dask.distributed"] = False
_programs["dftd3"] = "dftd3" in qcng.list_available_programs()
def has_module(name):
return _programs[name]
def check_has_module(program):
import_message = "Not detecting module {}. Install package if necessary to enable tests."
if has_module(program) is False:
pytest.skip(import_message.format(program))
def _build_pytest_skip(program):
import_message = "Not detecting module {}. Install package if necessary to enable tests."
return pytest.mark.skipif(has_module(program) is False, reason=import_message.format(program))
# Add a number of module testing options
using_dask = _build_pytest_skip("dask.distributed")
using_dask_jobqueue = _build_pytest_skip("dask_jobqueue")
using_dftd3 = _build_pytest_skip("dftd3")
using_fireworks = _build_pytest_skip("fireworks")
using_geometric = _build_pytest_skip("geometric")
using_parsl = _build_pytest_skip("parsl")
using_psi4 = _build_pytest_skip("psi4")
using_rdkit = _build_pytest_skip("rdkit")
using_torsiondrive = _build_pytest_skip("torsiondrive")
using_unix = pytest.mark.skipif(
os.name.lower() != "posix", reason="Not on Unix operating system, " "assuming Bash is not present"
)
### Generic helpers
def recursive_dict_merge(base_dict, dict_to_merge_in):
"""Recursive merge for more complex than a simple top-level merge {**x, **y} which does not handle nested dict."""
for k, v in dict_to_merge_in.items():
if k in base_dict and isinstance(base_dict[k], dict) and isinstance(dict_to_merge_in[k], Mapping):
recursive_dict_merge(base_dict[k], dict_to_merge_in[k])
else:
base_dict[k] = dict_to_merge_in[k]
def find_open_port():
"""
Use socket's built in ability to find an open port.
"""
sock = socket.socket()
sock.bind(("", 0))
host, port = sock.getsockname()
return port
@contextmanager
def preserve_cwd():
"""Always returns to CWD on exit
"""
cwd = os.getcwd()
try:
yield cwd
finally:
os.chdir(cwd)
def await_true(wait_time, func, *args, **kwargs):
wait_period = kwargs.pop("period", 4)
periods = max(int(wait_time / wait_period), 1)
for period in range(periods):
ret = func(*args, **kwargs)
if ret:
return True
time.sleep(wait_period)
return False
### Background thread loops
@contextmanager
def pristine_loop():
"""
Builds a clean IOLoop for using as a background request.
Courtesy of Dask Distributed
"""
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (ValueError, KeyError, RuntimeError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def loop_in_thread():
with pristine_loop() as loop:
# Add the IOloop to a thread daemon
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
try:
yield loop
finally:
try:
loop.add_callback(loop.stop)
thread.join(timeout=5)
except:
pass
def terminate_process(proc):
if proc.poll() is None:
# Sigint (keyboard interupt)
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
start = time.time()
while (proc.poll() is None) and (time.time() < (start + 15)):
time.sleep(0.02)
# Flat kill
finally:
proc.kill()
@contextmanager
def popen(args, **kwargs):
"""
Opens a background task.
Code and idea from dask.distributed's testing suite
https://github.com/dask/distributed
"""
args = list(args)
# Bin prefix
if sys.platform.startswith("win"):
bin_prefix = os.path.join(sys.prefix, "Scripts")
else:
bin_prefix = os.path.join(sys.prefix, "bin")
# Do we prefix with Python?
if kwargs.pop("append_prefix", True):
args[0] = os.path.join(bin_prefix, args[0])
# Add coverage testing
if kwargs.pop("coverage", False):
coverage_dir = os.path.join(bin_prefix, "coverage")
if not os.path.exists(coverage_dir):
print("Could not find Python coverage, skipping cov.")
else:
src_dir = os.path.dirname(os.path.abspath(__file__))
coverage_flags = [coverage_dir, "run", "--parallel-mode", "--source=" + src_dir]
# If python script, skip the python bin
if args[0].endswith("python"):
args.pop(0)
args = coverage_flags + args
# Do we optionally dumpstdout?
dump_stdout = kwargs.pop("dump_stdout", False)
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
output, error = proc.communicate()
if dump_stdout:
print("\n" + "-" * 30)
print("\n|| Process command: {}".format(" ".join(args)))
print("\n|| Process stderr: \n{}".format(error.decode()))
print("-" * 30)
print("\n|| Process stdout: \n{}".format(output.decode()))
print("-" * 30)
def run_process(args, **kwargs):
"""
Runs a process in the background until complete.
Returns True if exit code zero.
"""
timeout = kwargs.pop("timeout", 30)
terminate_after = kwargs.pop("interupt_after", None)
with popen(args, **kwargs) as proc:
if terminate_after is None:
proc.wait(timeout=timeout)
else:
time.sleep(terminate_after)
terminate_process(proc)
retcode = proc.poll()
return retcode == 0
### Server testing mechanics
@pytest.fixture(scope="session")
def postgres_server():
if shutil.which("psql") is None:
pytest.skip("Postgres is not installed on this server and no active postgres could be found.")
storage = None
psql = PostgresHarness({"database": {"port": 5432}})
# psql = PostgresHarness({"database": {"port": 5432, "username": "qcarchive", "password": "mypass"}})
if not psql.is_alive():
print()
print(
f"Could not connect to a Postgres server at {psql.config.database_uri()}, this will increase time per test session by ~3 seconds."
)
print()
storage = TemporaryPostgres()
psql = storage.psql
print("Using Database: ", psql.config.database_uri())
yield psql
if storage:
storage.stop()
def reset_server_database(server):
"""Resets the server database for testing.
"""
if "QCFRACTAL_RESET_TESTING_DB" in os.environ:
server.storage._clear_db(server.storage._project_name)
server.storage._delete_DB_data(server.storage._project_name)
# Force a heartbeat after database clean if a manager is present.
if server.queue_socket:
server.await_results()
@pytest.fixture(scope="module")
def test_server(request, postgres_server):
"""
Builds a server instance with the event loop running in a thread.
"""
# Storage name
storage_name = "test_qcfractal_server"
postgres_server.create_database(storage_name)
with FractalSnowflake(
max_workers=0,
storage_project_name="test_qcfractal_server",
storage_uri=postgres_server.database_uri(),
start_server=False,
reset_database=True,
) as server:
# Clean and re-init the database
yield server
def build_adapter_clients(mtype, storage_name="test_qcfractal_compute_server"):
# Basic boot and loop information
if mtype == "pool":
from concurrent.futures import ProcessPoolExecutor
adapter_client = ProcessPoolExecutor(max_workers=2)
elif mtype == "dask":
dd = pytest.importorskip("dask.distributed")
adapter_client = dd.Client(n_workers=2, threads_per_worker=1, resources={"process": 1})
# Not super happy about this line, but shuts up dangling reference errors
adapter_client._should_close_loop = False
elif mtype == "fireworks":
fireworks = pytest.importorskip("fireworks")
fireworks_name = storage_name + "_fireworks_queue"
adapter_client = fireworks.LaunchPad(name=fireworks_name, logdir="/tmp/", strm_lvl="CRITICAL")
elif mtype == "parsl":
parsl = pytest.importorskip("parsl")
# Must only be a single thread as we run thread unsafe applications.
adapter_client = parsl.config.Config(executors=[parsl.executors.threads.ThreadPoolExecutor(max_threads=1)])
else:
raise TypeError("fractal_compute_server: internal parametrize error")
return adapter_client
@pytest.fixture(scope="module", params=_adapter_testing)
def adapter_client_fixture(request):
adapter_client = build_adapter_clients(request.param)
yield adapter_client
# Do a final close with existing tech
build_queue_adapter(adapter_client).close()
@pytest.fixture(scope="module", params=_adapter_testing)
def managed_compute_server(request, postgres_server):
"""
A FractalServer with compute associated parametrize for all managers.
"""
storage_name = "test_qcfractal_compute_server"
postgres_server.create_database(storage_name)
adapter_client = build_adapter_clients(request.param, storage_name=storage_name)
# Build a server with the thread in a outer context loop
# Not all adapters play well with internal loops
with loop_in_thread() as loop:
server = FractalServer(
port=find_open_port(),
storage_project_name=storage_name,
storage_uri=postgres_server.database_uri(),
loop=loop,
queue_socket=adapter_client,
ssl_options=False,
)
# Clean and re-init the database
reset_server_database(server)
# Build Client and Manager
from qcfractal.interface import FractalClient
client = FractalClient(server)
from qcfractal.queue import QueueManager
manager = QueueManager(client, adapter_client)
yield client, server, manager
# Close down and clean the adapter
manager.close_adapter()
manager.stop()
@pytest.fixture(scope="module")
def fractal_compute_server(postgres_server):
"""
A FractalServer with a local Pool manager.
"""
# Storage name
storage_name = "test_qcfractal_compute_snowflake"
postgres_server.create_database(storage_name)
with FractalSnowflake(
max_workers=2,
storage_project_name=storage_name,
storage_uri=postgres_server.database_uri(),
reset_database=True,
start_server=False,
) as server:
reset_server_database(server)
yield server
def build_socket_fixture(stype, server=None):
print("")
# Check mongo
storage_name = "test_qcfractal_storage" + stype
# IP/port/drop table is specific to build
if stype == "sqlalchemy":
server.create_database(storage_name)
storage = storage_socket_factory(server.database_uri(), storage_name, db_type=stype, sql_echo=False)
# Clean and re-init the database
storage._clear_db(storage_name)
else:
raise KeyError("Storage type {} not understood".format(stype))
yield storage
if stype == "sqlalchemy":
# todo: drop db
# storage._clear_db(storage_name)
pass
else:
raise KeyError("Storage type {} not understood".format(stype))
@pytest.fixture(scope="module", params=["sqlalchemy"])
def socket_fixture(request):
yield from build_socket_fixture(request.param)
@pytest.fixture(scope="module")
def sqlalchemy_socket_fixture(request, postgres_server):
yield from build_socket_fixture("sqlalchemy", postgres_server)
def live_fractal_or_skip():
"""
Ensure Fractal live connection can be made
First looks for a local staging server, then tries QCArchive.
"""
try:
return FractalClient("localhost:7777", verify=False)
except (requests.exceptions.ConnectionError, ConnectionRefusedError):
print("Failed to connect to localhost, trying MolSSI QCArchive.")
try:
requests.get("https://api.qcarchive.molssi.org:443", json={}, timeout=5)
return FractalClient()
except (requests.exceptions.ConnectionError, ConnectionRefusedError):
return pytest.skip("Could not make a connection to central Fractal server")
def df_compare(df1, df2, sort=False):
""" checks equality even when columns contain numpy arrays, which .equals and == struggle with """
if sort:
if isinstance(df1, pd.DataFrame):
df1 = df1.reindex(sorted(df1.columns), axis=1)
elif isinstance(df1, pd.Series):
df1 = df1.sort_index()
if isinstance(df2, pd.DataFrame):
df2 = df2.reindex(sorted(df2.columns), axis=1)
elif isinstance(df2, pd.Series):
df2 = df2.sort_index()
def element_equal(e1, e2):
if isinstance(e1, np.ndarray):
if not np.array_equal(e1, e2):
return False
elif isinstance(e1, Molecule):
if not e1.get_hash() == e2.get_hash():
return False
# Because nan != nan
elif isinstance(e1, float) and np.isnan(e1):
if not np.isnan(e2):
return False
else:
if not e1 == e2:
return False
return True
if isinstance(df1, pd.Series):
if not isinstance(df2, pd.Series):
return False
if len(df1) != len(df2):
return False
for i in range(len(df1)):
if not element_equal(df1[i], df2[i]):
return False
return True
for column in df1.columns:
if column.startswith("_"):
df1.drop(column, axis=1, inplace=True)
for column in df2.columns:
if column.startswith("_"):
df2.drop(column, axis=1, inplace=True)
if not all(df1.columns == df2.columns):
return False
if not all(df1.index.values == df2.index.values):
return False
for i in range(df1.shape[0]):
for j in range(df1.shape[1]):
if not element_equal(df1.iloc[i, j], df2.iloc[i, j]):
return False
return True
|
dl_lane_following_ncs.py | #!/usr/bin/env python
# Copyright (c) 2018 LG Electronics, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import sys
import os
import threading
import time
import argparse
import numpy as np
import cv2
import rclpy
from rclpy.node import Node
from cv_bridge import CvBridge, CvBridgeError
from duckietown.duckietown_utils.jpg import image_cv_from_jpg
from sensor_msgs.msg import CompressedImage, Image, Joy
from duckietown_msgs.msg import Twist2DStamped, BoolStamped
from mvnc import mvncapi as mvnc
IMAGE_DIM = (160, 120)
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
GRAPH_NAME = 'caffe_model_2.graph'
class DLLaneFollowingNCSNode(Node):
def __init__(self, args):
self.node_name = 'dl_lane_following_ncs_node'
super().__init__(self.node_name)
self.args = args
# thread lock
self.thread_lock = threading.Lock()
self.sock = None
self.state = 1
self.max_speed = 0.2
self.min_speed = 0.1
self.omega_threshold = 2.5
self.speed = self.args.speed
self.omega_gain = self.args.omega_gain
devices = mvnc.EnumerateDevices()
device = mvnc.Device(devices[0])
device.OpenDevice()
with open('{}/host/model/caffe/{}'.format(BASE_PATH, GRAPH_NAME), mode='rb') as f:
graph_in_memory = f.read()
self.graph = device.AllocateGraph(graph_in_memory)
# self.graph.SetGraphOption(mvnc.GlobalOption.LOG_LEVEL, 2)
self.loginfo('[{}] Graph allocated: {}'.format(self.node_name, GRAPH_NAME))
self.sub_image = self.create_subscription(CompressedImage, self.args.subscribe_topic, self.callback)
self.sub_joy_btn = self.create_subscription(BoolStamped, self.args.joystick_override, self.joystick_override_callback)
self.pub_car_cmd = self.create_publisher(Twist2DStamped, self.args.publish_topic)
def callback(self, image_msg):
if self.state == 1:
return
# start a daemon thread to process the image
thread = threading.Thread(target=self.processImage, args=(image_msg,))
thread.setDaemon(True)
thread.start()
def joystick_override_callback(self, joystick_override_msg):
self.loginfo("Switching to joystick mode: " + str(joystick_override_msg.data))
self.state = 1 if joystick_override_msg.data else -1
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
# Return immediately if the thread is locked
return
try:
self.processImage_(image_msg)
#message_time = image_msg.header.stamp.sec + image_msg.header.stamp.nanosec*1e-9
#current_time = time.time()
#delay = current_time - message_time
#print("message time: " + str(message_time))
#print("current time: " + str(current_time))
#print("delay: " + str(delay))
finally:
# release the thread lock
self.thread_lock.release()
def processImage_(self, image_msg):
t1 = time.time()
# decode from compressed image with OpenCV
try:
image_cv = image_cv_from_jpg(image_msg.data)
except ValueError as e:
self.loginfo('Could not decode image: %s' % e)
return
# import image for classification
img = cv2.resize(image_cv, IMAGE_DIM, interpolation=cv2.INTER_NEAREST)
img = img[50:, :, :]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
self.graph.LoadTensor(img.astype(np.float16), 'user_object')
preds, _ = self.graph.GetResult()
predicted_omega = preds[0]
# set car cmd through ros message
car_control_msg = Twist2DStamped()
car_control_msg.header = image_msg.header
car_control_msg.v = self.speed
# car_control_msg.v = self.normalize_speed(predicted_omega, self.omega_threshold, self.min_speed, self.max_speed)
car_control_msg.omega = predicted_omega * self.omega_gain
t2 = time.time()
#print('Time: %.3f Speed: %.3f Omega: %.3f' % ((t2 - t1), car_control_msg.v, car_control_msg.omega))
# publish the control command
self.publishCmd(car_control_msg)
def publishCmd(self, car_cmd_msg):
self.pub_car_cmd.publish(car_cmd_msg)
def normalize_speed(self, w, w_max, v_min, v_max):
w_min = 0.0
v_min, v_max = -v_max, -v_min
v = abs((v_max - v_min) / (w_max - w_min) * (abs(w) - w_max) + v_max)
if v < v_min:
v = v_min
return v
def loginfo(self, s):
self.get_logger().info(s)
def main(args=None):
if args is None:
args = sys.argv
rclpy.init(args=args)
parser = argparse.ArgumentParser()
parser.add_argument("--speed",
type=float,
default=0.2,
help="wheel speed velocity gain")
parser.add_argument("--omega_gain",
type=float,
default=3.0,
help="multiplier for trim vehicle turning rate")
parser.add_argument("--subscribe_topic",
type=str,
default="/simulator/camera_node/image/compressed",
help="name of topic to subscribe to for camera images")
parser.add_argument("--publish_topic",
type=str,
default="/simulator/joy_mapper_node/car_cmd",
help="name of topic to publish car command to")
parser.add_argument("--joystick_override",
type=str,
default="/joystick_override",
help="name of topic to subscribe to for joystick override signal")
args = parser.parse_args()
node = DLLaneFollowingNCSNode(args)
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
rospy.init_node('dl_lane_following', anonymous=False)
dl_lane_following = dl_lane_following()
rospy.spin()
|
crossover_parr.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 31 16:39:03 2021
@author: akshat
"""
import numpy as np
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.DataStructs.cDataStructs import TanimotoSimilarity
from selfies import encoder, decoder
import multiprocessing
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
manager = multiprocessing.Manager()
lock = multiprocessing.Lock()
def get_selfie_chars(selfie):
'''Obtain a list of all selfie characters in string selfie
Parameters:
selfie (string) : A selfie string - representing a molecule
Example:
>>> get_selfie_chars('[C][=C][C][=C][C][=C][Ring1][Branch1_1]')
['[C]', '[=C]', '[C]', '[=C]', '[C]', '[=C]', '[Ring1]', '[Branch1_1]']
Returns
-------
chars_selfie (list of strings) :
list of selfie characters present in molecule selfie
'''
chars_selfie = [] # A list of all SELFIE sybols from string selfie
while selfie != '':
chars_selfie.append(selfie[selfie.find('['): selfie.find(']')+1])
selfie = selfie[selfie.find(']')+1:]
return chars_selfie
def get_fp_scores(smiles_back, target_smi):
'''
Given a list of SMILES (smiles_back), tanimoto similarities are calculated
(using Morgan fingerprints) to SMILES (target_smi).
Parameters
----------
smiles_back : (list of str)
List of valid SMILE strings.
target_smi : (str)
Valid SMILES string.
Returns
-------
smiles_back_scores : (list of floats)
List of figerprint similarity scores of each smiles in input list.
'''
smiles_back_scores = []
target = Chem.MolFromSmiles(target_smi)
fp_target = AllChem.GetMorganFingerprint(target, 2)
for item in smiles_back:
mol = Chem.MolFromSmiles(item)
fp_mol = AllChem.GetMorganFingerprint(mol, 2)
score = TanimotoSimilarity(fp_mol, fp_target)
smiles_back_scores.append(score)
return smiles_back_scores
def get_joint_sim(all_smiles, starting_smile, target_smile):
'''
Get joint similarity values for all smiles in all_smiles, calculated with
refernce to starting_smile & target_smile.
Parameters
----------
all_smiles : (list of string)
List of SMILE strings.
starting_smile : (str)
Input smiles string.
target_smile : (str)
Input smiles string.
Returns
-------
better_score : (list of floats)
List of joint similarity scores for all smiles in all_smiles.
'''
scores_start = get_fp_scores(all_smiles, starting_smile) # similarity to target
scores_target = get_fp_scores(all_smiles, target_smile) # similarity to starting structure
data = np.array([scores_target, scores_start])
avg_score = np.average(data, axis=0)
better_score = avg_score - (np.abs(data[0] - data[1]))
better_score = ((1/9) * better_score**3) - ((7/9) * better_score**2) + ((19/12) * better_score)
return better_score
def obtain_path(starting_smile, target_smile):
'''
Create a single path between molecules starting_smile and target_smile.
Parameters
----------
starting_smile : (str)
Valid SMILES string.
target_smile : (str)
Valid SMILES string.
Returns
-------
path_smiles : (list of str)
List of all smiles strings encountered while creating a path.
'''
starting_selfie = encoder(starting_smile)
target_selfie = encoder(target_smile)
starting_selfie_chars = get_selfie_chars(starting_selfie)
target_selfie_chars = get_selfie_chars(target_selfie)
# Pad the smaller string
if len(starting_selfie_chars) < len(target_selfie_chars):
for _ in range(len(target_selfie_chars)-len(starting_selfie_chars)):
starting_selfie_chars.append(' ')
else:
for _ in range(len(starting_selfie_chars)-len(target_selfie_chars)):
target_selfie_chars.append(' ')
indices_diff = [i for i in range(len(starting_selfie_chars)) if starting_selfie_chars[i] != target_selfie_chars[i]]
path = {}
path[0] = starting_selfie_chars
for iter_ in range(len(indices_diff)):
idx = np.random.choice(indices_diff, 1)[0] # Index to be operated on
indices_diff.remove(idx) # Remove that index
# Select the last member of path:
path_member = path[iter_].copy()
# Mutate that character to the correct value:
path_member[idx] = target_selfie_chars[idx]
path[iter_+1] = path_member.copy()
# Collapse path to make them into SELFIE strings
paths_selfies = []
for i in range(len(path)):
selfie_str = ''.join(x for x in path[i])
paths_selfies.append(selfie_str.replace(' ', ''))
if paths_selfies[-1] != target_selfie:
raise Exception("Unable to discover target structure!")
path_smiles = [decoder(x) for x in paths_selfies]
return path_smiles
def perform_crossover(comb_smi, num_random_samples):
'''
Create multiple paths between SMILES in comb_smi to obtain median molecules,
representing the crossover structure.
Parameters
----------
comb_smi : (str)
Two smiles string concatenated using xxx (example: CCCCCCxxxSSS).
num_random_samples : (int)
Number of different smiles orientations to consider while forming paths.
Returns
-------
collect_smiles_canon : (list of SMILES)
List of all potential unique median molecules enoucntered during path formation.
'''
smi_a, smi_b = comb_smi.split('xxx')
mol_a, mol_b = Chem.MolFromSmiles(smi_a), Chem.MolFromSmiles(smi_b)
Chem.Kekulize(mol_a)
Chem.Kekulize(mol_b)
randomized_smile_orderings_a = []
for _ in range(num_random_samples):
randomized_smile_orderings_a.append(rdkit.Chem.MolToSmiles(mol_a, canonical=False, doRandom=True, isomericSmiles=False, kekuleSmiles=True))
randomized_smile_orderings_b = []
for _ in range(num_random_samples):
randomized_smile_orderings_b.append(rdkit.Chem.MolToSmiles(mol_b, canonical=False, doRandom=True, isomericSmiles=False, kekuleSmiles=True))
collect_smiles = []
for smi_1 in randomized_smile_orderings_a:
for smi_2 in randomized_smile_orderings_b:
for item in obtain_path(smi_1, smi_2):
collect_smiles.append(item)
collect_smiles_canon = []
for item in collect_smiles:
try:
smi_canon = Chem.MolToSmiles(Chem.MolFromSmiles(item, sanitize=True), isomericSmiles=False, canonical=True)
if len(smi_canon) <= 81: # Size restriction!
collect_smiles_canon.append(smi_canon)
except:
continue
collect_smiles_canon = list(set(collect_smiles_canon))
return collect_smiles_canon
def crossover_smiles(smiles_join):
'''
Return a list of smiles (crossover molecules) that are ordered (highest to lowest)
by joint similarity scores.
Parameters
----------
smiles_join : (str)
Two smiles string concatenated using xxx (example: CCCCCCxxxSSS).
Returns
-------
med_all_ord : (list of SMILES)
List of crossover molecules that are ordered (highest to lowest)
by joint similarity scores.
'''
map_ = {}
map_[smiles_join] = perform_crossover(smiles_join, num_random_samples=1)
# map_ordered = {}
for key_ in map_:
med_all = map_[key_]
smi_1, smi_2 = key_.split('xxx')
joint_sim = get_joint_sim(med_all, smi_1, smi_2)
joint_sim_ord = np.argsort(joint_sim)
joint_sim_ord = joint_sim_ord[::-1]
med_all_ord = [med_all[i] for i in joint_sim_ord]
return med_all_ord
def get_chunks(arr, num_processors, ratio):
'''
Split list of SMILES int sublists, each of which will be operated on seperate cpus.
Parameters
----------
arr : (list of sts)
A list of SMILES.
num_processors : (int)
Number of cpus available for conducting operation.
ratio : (int)
number of operations that will be performed on each cpu.
Returns
-------
chunks: (list of lists)
Each sublist is used by a different cpu to perform operations.
'''
chunks = [] # Collect arrays that will be sent to different processorr
counter = int(ratio)
for i in range(num_processors):
if i == 0:
chunks.append(arr[0:counter])
if i != 0 and i<num_processors-1:
chunks.append(arr[counter-int(ratio): counter])
if i == num_processors-1:
chunks.append(arr[counter-int(ratio): ])
counter += int(ratio)
return chunks
def calc_parr_prop(unseen_smile_ls, property_name, props_collect):
'''Create crossover structures for each molecule in unseen_smile_ls, and record results
in locked dictionary props_collect.
'''
for smile_join in unseen_smile_ls:
props_collect[property_name][smile_join] = crossover_smiles(smile_join)
def create_parr_process(chunks, property_name):
'''
Create parallel processes for creating crossover molecules for molecule in sublist chunks.
Parameters
----------
chunks : (list of list)
list of lists containing SMILES.
property_name : (syr)
optional name paramtere to enter.
Returns
-------
combined_dict : (dict)
input smiles -> [List of crossover smiles].
'''
process_collector = []
collect_dictionaries = []
# manager = multiprocessing.Manager()
# lock = multiprocessing.Lock()
for item in chunks:
props_collect = manager.dict(lock=True)
smiles_map_ = manager.dict(lock=True)
props_collect[property_name] = smiles_map_
collect_dictionaries.append(props_collect)
if property_name == 'logP':
process_collector.append(multiprocessing.Process(target=calc_parr_prop, args=(item, property_name, props_collect, )))
for item in process_collector:
item.start()
for item in process_collector: # wait for all parallel processes to finish
item.join()
combined_dict = {} # collect results from multiple processess
for i,item in enumerate(collect_dictionaries):
combined_dict.update(item[property_name])
return combined_dict
def crossover_smiles_parr(smiles_join):
'''
Generate crossover smiles for peach pair of smiles in smiles_join.
Parameters
----------
smiles_join : list of strings.
List of two smiles string concatenated using xxx (example: CCCCCCxxxSSS).
Returns
-------
A : (dictionary)
A map smiles -> [List of crossover molecules].
'''
num_processors = multiprocessing.cpu_count()
molecules_here_unique = smiles_join
ratio = len(molecules_here_unique) / num_processors
chunks = get_chunks(molecules_here_unique, num_processors, ratio)
A = create_parr_process(chunks, property_name='logP')
return A
|
config_pfsense.py | #!/usr/bin/env python3
# scripts/config_pfsense.py
#
# Import/Export script for vIOS.
#
# @author Alain Degreffe <eczema@ecze.com>
# @copyright 2016 Alain Degreffe
# @license http://www.gnu.org/licenses/gpl.html
# @link http://www.unetlab.com/
# @version 20160422
import getopt, multiprocessing, os, pexpect, re, sys, time
conntimeout = 3 # Maximum time for console connection
expctimeout = 3 # Maximum time for each short expect
longtimeout = 30 # Maximum time for each long expect
timeout = 60 # Maximum run time (conntimeout is included)
def node_login(handler):
# Send an empty line, and wait for the login prompt
i = -1
while i == -1:
try:
handler.sendline('\r\n')
i = handler.expect([
'Enter an option:',
'.*root.*:'], timeout = 5)
except:
i = -1
if i == 0:
# Need to send username and password
handler.sendline('8')
try:
handler.expect('.*root.*:', timeout = expctimeout)
return True
except:
print('ERROR: error waiting for "root:" prompt.')
node_quit(handler)
return False
elif i == 1:
# nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
def node_quit(handler):
if handler.isalive() == True:
handler.sendline('exit\n')
handler.close()
def config_get(handler):
# Getting the config
handler.setwinsize(100, 120)
handler.sendline('cat /conf/config.xml | awk \'{print $0}\'\n')
#handler.sendline('cat `ls -rt /conf/backup/config-* | tail -1 `\n')
try:
handler.expect('</pfsense>', timeout = longtimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
config = handler.before.decode()
# Manipulating the config
config = re.sub('\r', '', config, flags=re.DOTALL) # Unix style
config = config + '</pfsense>\n';
config = re.sub('.*<\?xml version=\"1.0\"\?>', '<?xml version=\"1.0\"?>', config, flags=re.DOTALL) # Header
return config
def config_put(handler):
while True:
try:
i = handler.expect('Do you want to set up VLANs now.*', timeout)
break
except:
return False
handler.sendline('')
handler.sendline('\n')
handler.sendline('mount -t cd9660 /dev/cd0 /mnt\n')
handler.sendline('cp /mnt/config.xml /conf/\n')
handler.sendline('exit\n')
while True:
try:
i = handler.expect('option:', timeout)
except:
return False
return True
def usage():
print('Usage: %s <standard options>' %(sys.argv[0]));
print('Standard Options:');
print('-a <s> *Action can be:')
print(' - get: get the startup-configuration and push it to a file')
print(' - put: put the file as startup-configuration')
print('-f <s> *File');
print('-p <n> *Console port');
print('-t <n> Timeout (default = %i)' %(timeout));
print('* Mandatory option')
def now():
# Return current UNIX time in milliseconds
return int(round(time.time() * 1000))
def main(action, fiename, port):
try:
# Connect to the device
tmp = conntimeout
while (tmp > 0):
handler = pexpect.spawn('telnet 127.0.0.1 %i' %(port))
time.sleep(0.1)
tmp = tmp - 0.1
if handler.isalive() == True:
break
if (handler.isalive() != True):
print('ERROR: cannot connect to port "%i".' %(port))
node_quit(handler)
sys.exit(1)
if action == 'get':
rc = node_login(handler)
if rc != True:
print('ERROR: failed to login.')
node_quit(handler)
sys.exit(1)
config = config_get(handler)
if config in [False, None]:
print('ERROR: failed to retrieve config.')
node_quit(handler)
sys.exit(1)
try:
fd = open(filename, 'a')
fd.write(config)
fd.close()
except:
print('ERROR: cannot write config to file.')
node_quit(handler)
sys.exit(1)
elif action == 'put':
rc = config_put(handler)
if rc != True:
print('ERROR: failed to push config.')
node_quit(handler)
sys.exit(1)
# Remove lock file
lock = '%s/.lock' %(os.path.dirname(filename))
if os.path.exists(lock):
os.remove(lock)
# Mark as configured
configured = '%s/.configured' %(os.path.dirname(filename))
if not os.path.exists(configured):
open(configured, 'a').close()
node_quit(handler)
sys.exit(0)
except Exception as e:
print('ERROR: got an exception')
print(type(e)) # the exception instance
print(e.args) # arguments stored in .args
print(e) # __str__ allows args to be printed directly,
node_quit(handler)
return False
if __name__ == "__main__":
action = None
filename = None
port = None
# Getting parameters from command line
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:p:t:f:', ['action=', 'port=', 'timeout=', 'file='])
except getopt.GetoptError as e:
usage()
sys.exit(3)
for o, a in opts:
if o in ('-a', '--action'):
action = a
elif o in ('-f', '--file'):
filename = a
elif o in ('-p', '--port'):
try:
port = int(a)
except:
port = -1
elif o in ('-t', '--timeout'):
try:
timeout = int(a)
except:
timeout = -1
else:
print('ERROR: invalid parameter.')
# Checking mandatory parameters
if action == None or port == None or filename == None:
usage()
print('ERROR: missing mandatory parameters.')
sys.exit(1)
if action not in ['get', 'put']:
usage()
print('ERROR: invalid action.')
sys.exit(1)
if timeout < 0:
usage()
print('ERROR: timeout must be 0 or higher.')
sys.exit(1)
if port < 0:
usage()
print('ERROR: port must be 32768 or higher.')
sys.exit(1)
if action == 'get' and os.path.exists(filename):
usage()
print('ERROR: destination file already exists.')
sys.exit(1)
if action == 'put' and not os.path.exists(filename):
usage()
print('ERROR: source file does not already exist.')
sys.exit(1)
if action == 'put':
try:
fd = open(filename, 'r')
config = fd.read()
fd.close()
except:
usage()
print('ERROR: cannot read from file.')
sys.exit(1)
# Backgrounding the script
end_before = now() + timeout * 1000
p = multiprocessing.Process(target=main, name="Main", args=(action, filename, port))
p.start()
while (p.is_alive() and now() < end_before):
# Waiting for the child process to end
time.sleep(1)
if p.is_alive():
# Timeout occurred
print('ERROR: timeout occurred.')
p.terminate()
sys.exit(127)
if p.exitcode != 0:
sys.exit(127)
sys.exit(0)
|
cloudmgr.py | #!/usr/bin/python3
from io import StringIO
import os,sys,subprocess,time,re,datetime,threading,random,shutil
from utils.model import db, Image
from master.deploy import *
import json
from utils.log import logger
from utils import env
import requests
fspath = env.getenv('FS_PREFIX')
class AliyunMgr():
def __init__(self):
self.AcsClient = __import__('aliyunsdkcore.client', fromlist=["AcsClient"])
self.Request = __import__('aliyunsdkecs.request.v20140526', fromlist=[
"CreateInstanceRequest",
"StopInstanceRequest",
"DescribeInstancesRequest",
"DeleteInstanceRequest",
"StartInstanceRequest",
"DescribeInstancesRequest",
"AllocateEipAddressRequest",
"AssociateEipAddressRequest"])
def loadClient(self):
if not os.path.exists(fspath+"/global/sys/cloudsetting.json"):
currentfilepath = os.path.dirname(os.path.abspath(__file__))
templatefilepath = currentfilepath + "/../tools/cloudsetting.aliyun.template.json"
shutil.copyfile(templatefilepath,fspath+"/global/sys/cloudsetting.json")
logger.error("please modify the setting file first")
return False
try:
settingfile = open(fspath+"/global/sys/cloudsetting.json", 'r')
self.setting = json.loads(settingfile.read())
settingfile.close()
self.clt = self.AcsClient.AcsClient(self.setting['AccessKeyId'],self.setting['AccessKeySecret'], self.setting['RegionId'])
logger.info("load CLT of Aliyun success")
return True
except Exception as e:
logger.error(e)
return False
def createInstance(self):
request = self.Request.CreateInstanceRequest.CreateInstanceRequest()
request.set_accept_format('json')
request.add_query_param('RegionId', self.setting['RegionId'])
if 'ZoneId' in self.setting and not self.setting['ZoneId'] == "":
request.add_query_param('ZoneId', self.setting['ZoneId'])
if 'VSwitchId' in self.setting and not self.setting['VSwitchId'] == "":
request.add_query_param('VSwitchId', self.setting['VSwitchId'])
request.add_query_param('ImageId', 'ubuntu_16_0402_64_20G_alibase_20170818.vhd')
request.add_query_param('InternetMaxBandwidthOut', 1)
request.add_query_param('InstanceName', 'docklet_tmp_worker')
request.add_query_param('HostName', 'worker-tmp')
request.add_query_param('SystemDisk.Size', int(self.setting['SystemDisk.Size']))
request.add_query_param('InstanceType', self.setting['InstanceType'])
request.add_query_param('Password', self.setting['Password'])
response = self.clt.do_action_with_exception(request)
logger.info(response)
instanceid=json.loads(bytes.decode(response))['InstanceId']
return instanceid
def startInstance(self, instanceid):
request = self.Request.StartInstanceRequest.StartInstanceRequest()
request.set_accept_format('json')
request.add_query_param('InstanceId', instanceid)
response = self.clt.do_action_with_exception(request)
logger.info(response)
def createEIP(self):
request = self.Request.AllocateEipAddressRequest.AllocateEipAddressRequest()
request.set_accept_format('json')
request.add_query_param('RegionId', self.setting['RegionId'])
response = self.clt.do_action_with_exception(request)
logger.info(response)
response=json.loads(bytes.decode(response))
eipid=response['AllocationId']
eipaddr=response['EipAddress']
return [eipid, eipaddr]
def associateEIP(self, instanceid, eipid):
request = self.Request.AssociateEipAddressRequest.AssociateEipAddressRequest()
request.set_accept_format('json')
request.add_query_param('AllocationId', eipid)
request.add_query_param('InstanceId', instanceid)
response = self.clt.do_action_with_exception(request)
logger.info(response)
def getInnerIP(self, instanceid):
request = self.Request.DescribeInstancesRequest.DescribeInstancesRequest()
request.set_accept_format('json')
response = self.clt.do_action_with_exception(request)
instances = json.loads(bytes.decode(response))['Instances']['Instance']
for instance in instances:
if instance['InstanceId'] == instanceid:
return instance['NetworkInterfaces']['NetworkInterface'][0]['PrimaryIpAddress']
return json.loads(bytes.decode(response))['Instances']['Instance'][0]['VpcAttributes']['PrivateIpAddress']['IpAddress'][0]
def isStarted(self, instanceids):
request = self.Request.DescribeInstancesRequest.DescribeInstancesRequest()
request.set_accept_format('json')
response = self.clt.do_action_with_exception(request)
instances = json.loads(bytes.decode(response))['Instances']['Instance']
for instance in instances:
if instance['InstanceId'] in instanceids:
if not instance['Status'] == "Running":
return False
return True
def rentServers(self,number):
instanceids=[]
eipids=[]
eipaddrs=[]
for i in range(int(number)):
instanceids.append(self.createInstance())
time.sleep(2)
time.sleep(10)
for i in range(int(number)):
[eipid,eipaddr]=self.createEIP()
eipids.append(eipid)
eipaddrs.append(eipaddr)
time.sleep(2)
masterip=env.getenv('ETCD').split(':')[0]
for i in range(int(number)):
self.associateEIP(instanceids[i],eipids[i])
time.sleep(2)
time.sleep(5)
for instanceid in instanceids:
self.startInstance(instanceid)
time.sleep(2)
time.sleep(10)
while not self.isStarted(instanceids):
time.sleep(10)
time.sleep(5)
return [masterip, eipaddrs]
def addNode(self):
if not self.loadClient():
return {'success':'false'}
[masterip, eipaddrs] = self.rentServers(1)
threads = []
for eip in eipaddrs:
thread = threading.Thread(target = deploy, args=(eip,masterip,'root',self.setting['Password'],self.setting['VolumeName']))
thread.setDaemon(True)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
return {'success':'true'}
def addNodeAsync(self):
thread = threading.Thread(target = self.addNode)
thread.setDaemon(True)
thread.start()
class EmptyMgr():
def addNodeAsync(self):
logger.error("current cluster does not support scale out")
return False
class CloudMgr():
def getSettingFile(self):
if not os.path.exists(fspath+"/global/sys/cloudsetting.json"):
currentfilepath = os.path.dirname(os.path.abspath(__file__))
templatefilepath = currentfilepath + "/../tools/cloudsetting.aliyun.template.json"
shutil.copyfile(templatefilepath,fspath+"/global/sys/cloudsetting.json")
settingfile = open(fspath+"/global/sys/cloudsetting.json", 'r')
setting = settingfile.read()
settingfile.close()
return {'success':'true', 'result':setting}
def modifySettingFile(self, setting):
if setting == None:
logger.error("setting is None")
return {'success':'false'}
settingfile = open(fspath+"/global/sys/cloudsetting.json", 'w')
settingfile.write(setting)
settingfile.close()
return {'success':'true'}
def __init__(self):
if env.getenv("ALLOW_SCALE_OUT") == "True":
self.engine = AliyunMgr()
else:
self.engine = EmptyMgr()
|
socks.py | #!/usr/bin/python
"""Minimal non-feature complete socks proxy"""
import random
import socket
from SocketServer import StreamRequestHandler, ThreadingTCPServer
from struct import pack, unpack
import threading
import sys
def debug(s):
print >>sys.stderr, 'socks.py: ', s
def error(s):
print >>sys.stderr, 'socks.py, ERROR: ', s
class MyTCPServer(ThreadingTCPServer):
allow_reuse_address = True
def handle_timeout(self):
raise Exception('timeout')
CLOSE = object()
VERSION = '\x05'
NOAUTH = '\x00'
USERPASS = '\x02'
CONNECT = '\x01'
IPV4 = '\x01'
IPV6 = '\x04'
DOMAIN_NAME = '\x03'
SUCCESS = '\x00'
password = None
username = None
allow_v4 = False
def send(dest, msg):
if msg == CLOSE:
try: dest.shutdown(socket.SHUT_WR)
except: pass
dest.close()
return 0
else:
return dest.sendall(msg)
def recv(source, buffer):
data = source.recv(buffer)
if data == '':
return CLOSE
else:
return data
def forward(source, dest, name):
while True:
data = recv(source, 4000)
if data == CLOSE:
send(dest, CLOSE)
debug('%s hung up' % name)
return
# debug('Sending (%d) %r' % (len(data), data))
send(dest, data)
def spawn_forwarder(source, dest, name):
t = threading.Thread(target=forward, args=(source, dest, name))
t.daemon = True
t.start()
class SocksHandler(StreamRequestHandler):
"""Highly feature incomplete SOCKS 5 implementation"""
def close_request(self):
self.server.close_request(self.request)
def read(self, n):
data = ''
while len(data) < n:
extra = self.rfile.read(n)
if extra == '':
raise Exception('Connection closed')
data += extra
return data
def handle(self):
# IMRPOVEMENT: Report who requests are from in logging
# IMPROVEMENT: Timeout on client
debug('Connection - authenticating')
version = self.read(1)
if allow_v4 and version == '\x04':
cmd = self.read(1)
if cmd != CONNECT:
error('Only supports connect method not (%r) closing' % cmd)
self.close_request()
return
raw_dest_port = self.read(2)
dest_port, = unpack('>H', raw_dest_port)
raw_dest_address = self.read(4)
dest_address = '.'.join(map(str, unpack('>4B', raw_dest_address)))
user_id = ''
c = self.read(1)
while c != '\0':
user_id += c
c = self.read(1)
outbound_sock = socket.socket(socket.AF_INET)
out_address = socket.getaddrinfo(dest_address,dest_port)[0][4]
debug("Creating forwarder connection to %s:%d" % (out_address[0], out_address[1]))
outbound_sock.connect(out_address)
self.send_reply_v4(outbound_sock.getsockname())
spawn_forwarder(outbound_sock, self.request, 'destination')
forward(self.request, outbound_sock, 'client')
return
if version != '\x05':
error('Wrong version number (%r) closing...' % version)
self.close_request()
return
nmethods = ord(self.read(1))
method_list = self.read(nmethods)
global password
global username
if password == None and NOAUTH in method_list:
self.send_no_auth_method()
debug('Authenticated (no-auth)')
elif USERPASS in method_list:
self.send_user_pass_auth_method()
auth_version = self.read(1)
if auth_version != '\x01':
error('Wrong sub-negotiation version number (%r) closing...' % version)
self.close_request()
return
usr_len = ord(self.read(1))
usr_name = self.read(usr_len)
pwd_len = ord(self.read(1))
pwd = self.read(pwd_len)
if usr_name != username or pwd != password:
error('Invalid username or password')
self.close_request()
return
debug('Authenticated (user/password)')
self.send_authenticated()
else:
error('Server only supports NOAUTH and user/pass')
self.send_no_method()
return
# If we were authenticating it would go here
version, cmd, zero, address_type = self.read(4)
if version != '\x05':
error('Wrong version number (%r) closing...' % version)
self.close_request()
elif cmd != CONNECT:
error('Only supports connect method not (%r) closing' % cmd)
self.close_request()
elif zero != '\x00':
error('Mangled request. Reserved field (%r) is not null' % zero)
self.close_request()
if address_type == IPV4:
raw_dest_address = self.read(4)
dest_address = '.'.join(map(str, unpack('>4B', raw_dest_address)))
elif address_type == IPV6:
raw_dest_address = self.read(16)
dest_address = ":".join(map(lambda x: hex(x)[2:],unpack('>8H',raw_dest_address)))
elif address_type == DOMAIN_NAME:
dns_length = ord(self.read(1))
dns_name = self.read(dns_length)
dest_address = dns_name
else:
error('Unknown addressing (%r)' % address_type)
self.close_request()
raw_dest_port = self.read(2)
dest_port, = unpack('>H', raw_dest_port)
if address_type == IPV6:
outbound_sock = socket.socket(socket.AF_INET6)
else:
outbound_sock = socket.socket(socket.AF_INET)
try:
out_address = socket.getaddrinfo(dest_address,dest_port)[0][4]
except Exception, e:
print e
return
debug("Creating forwarder connection to %s:%d" % (out_address[0], out_address[1]))
try:
outbound_sock.connect(out_address)
except Exception, e:
print e
return
if address_type == IPV6:
self.send_reply6(outbound_sock.getsockname())
else:
self.send_reply(outbound_sock.getsockname())
spawn_forwarder(outbound_sock, self.request, 'destination')
try:
forward(self.request, outbound_sock, 'client')
except Exception,e:
print e
def send_reply_v4(self, (bind_addr, bind_port)):
self.wfile.write('\0\x5a\0\0\0\0\0\0')
self.wfile.flush()
def send_reply(self, (bind_addr, bind_port)):
bind_tuple = tuple(map(int, bind_addr.split('.')))
full_address = bind_tuple + (bind_port,)
debug('Setting up forwarding port %r' % (full_address,))
msg = pack('>cccc4BH', VERSION, SUCCESS, '\x00', IPV4, *full_address)
self.wfile.write(msg)
def send_reply6(self, (bind_addr, bind_port, unused1, unused2)):
bind_tuple = tuple(map(lambda x: int(x,16), bind_addr.split(':')))
full_address = bind_tuple + (bind_port,)
debug('Setting up forwarding port %r' % (full_address,))
msg = pack('>cccc8HH', VERSION, SUCCESS, '\x00', IPV6, *full_address)
self.wfile.write(msg)
def send_no_method(self):
self.wfile.write('\x05\xff')
self.close_request()
def send_no_auth_method(self):
self.wfile.write('\x05\x00')
self.wfile.flush()
def send_user_pass_auth_method(self):
self.wfile.write('\x05\x02')
self.wfile.flush()
def send_authenticated(self):
self.wfile.write('\x01\x00')
self.wfile.flush()
if __name__ == '__main__':
listen_port = 8002
i = 1
while i < len(sys.argv):
if sys.argv[i] == '--username':
username = sys.argv[i+1]
i += 1
elif sys.argv[i] == '--password':
password = sys.argv[i+1]
i += 1
elif sys.argv[i] == '--port':
listen_port = int(sys.argv[i+1])
i += 1
elif sys.argv[i] == '--allow-v4':
allow_v4 = True
else:
if sys.argv[i] != '--help': debug('unknown option "%s"' % sys.argv[i])
print('usage: socks.py [--username <user> --password <password>] [--port <listen-port>]')
sys.exit(1)
i += 1
debug('Listening on port %d...' % listen_port)
server = MyTCPServer(('localhost', listen_port), SocksHandler)
server.timeout = 190
while True:
server.handle_request()
|
viewing.py | #A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-* Filipe Maia (slicing code)
#-*
#-*
#Z* -------------------------------------------------------------------
from . import colorprinting
if True:
import sys
import threading
import pymol
from . import selector
import copy
from . import parsing
import re
cmd = sys.modules["pymol.cmd"]
from .cmd import _cmd, Shortcut, \
_feedback,fb_module,fb_mask, \
repres,repres_sc, is_string, is_list, \
repmasks,repmasks_sc, \
toggle_dict,toggle_sc,stereo_dict,stereo_sc, \
palette_dict, palette_sc, window_dict, window_sc, \
safe_list_eval, safe_alpha_list_eval, \
location_code, location_sc, boolean_dict, boolean_sc, \
DEFAULT_ERROR, DEFAULT_SUCCESS
palette_colors_dict = {
'rainbow_cycle' : 'magenta blue cyan green yellow orange red magenta',
'rainbow_cycle_rev' : 'magenta red orange yellow green cyan blue magenta',
'rainbow' : 'blue cyan green yellow orange red',
'rainbow_rev' : 'red orange yellow green cyan blue',
'rainbow2' : 'blue cyan green yellow orange red',
'rainbow2_rev' : 'red orange yellow green cyan blue',
'gcbmry' : 'green cyan blue magenta red yellow',
'yrmbcg' : 'yellow red magenta blue cyan green',
'cbmr' : 'cyan blue magenta red',
'rmbc' : 'red magenta blue cyan',
}
rep_list = [ "lines", "sticks", "spheres", "dots", "surface",
"mesh", "nonbonded", "nb_spheres", "cartoon",
"ribbon", "labels", "slice", "ellipsoids", "volume" ]
scene_action_sc = Shortcut(['store','recall','clear','insert_before',
'insert_after','next','previous',
'start', 'update','rename','delete',
'order', 'sort', 'first',
'append'])
scene_action_dict = {}
scene_action_dict_sc = Shortcut([])
view_sc = Shortcut(['store','recall','clear'])
def zoom(selection="all", buffer=0.0, state=0, complete=0, animate=0, *, _self=cmd):
'''
DESCRIPTION
"zoom" scales and translates the window and the origin to cover the
atom selection.
USAGE
zoom [ selection [, buffer [, state [, complete [, animate ]]]]]
EXAMPLES
zoom
zoom complete=1
zoom 142/, animate=3
zoom (chain A)
ARGUMENTS
selection = string: selection-expression or name pattern {default: all}
buffer = float: distance {default: 0}
state = 0: uses all coordinate states {default}
state = -1: uses only coordinates for the current state
state > 0: uses coordinates for a specific state
complete = 0 or 1: will insure no atoms centers are clipped
animate < 0: uses the default animation duration
animate = 0: no animation
animate > 0: animates using the provided duration in seconds
PYMOL API
cmd.zoom(string selection, float buffer, int state, int complete,
int animate)
NOTES
The zoom command normally tries to guess an optimal zoom level for
visualization, balancing closeness against occasional clipping of
atoms out of the field of view. You can change this behavior by
setting the complete option to 1, which will guarantee that the
atom positions for the entire selection will fit in the field of
an orthoscopic view.
To absolutely prevent clipping, you may also need to add an
additional buffer (typically 2 A) to account for graphical
representations which extend beyond the atom coordinates.
SEE ALSO
origin, orient, center
'''
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
r = _cmd.zoom(_self._COb,str(selection),float(buffer),
int(state)-1,int(complete),float(animate))
return r
def center(selection="all", state=0, origin=1, animate=0, *, _self=cmd):
'''
DESCRIPTION
"center" translates the window, the clipping slab, and the
origin to a point centered within the atom selection.
USAGE
center [ selection [, state [, origin [, animate ]]]]
EXAMPLES
center chain B
center 145/
ARGUMENTS
selection = string: selection-expression or name pattern (default: "all").
state = 0 (default) use all coordinate states
state = -1 use only coordinates for the current state
state > 0 use coordinates for a specific state
origin = 1 (default) move the origin
origin = 0 leave the origin unchanged
PYMOL API
cmd.center(string selection, int state, int origin)
SEE ALSO
origin, orient, zoom
'''
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
r = _cmd.center(_self._COb,str(selection),int(state)-1,int(origin),float(animate))
return r
clip_action_sc = Shortcut([ 'near','far','move','slab','atoms' ])
def clip(mode, distance, selection=None, state=0, *, _self=cmd):
'''
DESCRIPTION
"clip" alters the positions of the clipping planes.
USAGE
clip mode, distance [, selection [, state ]]
ARGUMENTS
mode = near, far, move, slab, or atoms
distance is a floating point value
selection = atom selection (for mode=atoms only)
EXAMPLES
clip near, -5 # moves near plane away from you by 5 A
clip far, 10 # moves far plane towards you by 10 A
clip move, -5 # moves the slab away from you by 5 A
clip slab, 20 # sets slab thickness to 20 A
clip slab, 10, resi 11 # clip 10 A slab about residue 11
clip atoms, 5, pept # clip atoms in "pept" with a 5 A buffer
# about their current camera positions
PYMOL API
cmd.clip(string mode, float distance, string selection, int state)
SEE ALSO
zoom, orient, reset
'''
mode = clip_action_sc.auto_err(str(mode),'mode')
if selection is not None:
selection = selector.process(selection)
else:
selection = ''
with _self.lockcm:
r = _cmd.clip(_self._COb,str(mode),float(distance),
str(selection),int(state)-1)
return r
def origin(selection="(all)", object=None, position=None, state=0, *, _self=cmd):
'''
DESCRIPTION
"origin" sets the center of rotation about a selection. If an
object name is specified, it can be used to set the center of
rotation for the object (for use in animation and editing).
USAGE
origin [ selection [, object [,position, [, state ]]]]
ARGUMENTS
selection = string: selection-expression or name-list {default: (all)}
state = 0 (default) use all coordinate states
state = -1 use only coordinates for the current state
state > 0 use coordinates for a specific state
EXAMPLES
origin chain A
origin position=[1.0,2.0,3.0]
PYMOL API
cmd.origin(string object-or-selection)
SEE ALSO
zoom, orient, reset
'''
#'
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
if object is None: object=''
if position is None: position=(0.0,0.0,0.0)
else:
if _self.is_string(position):
position = safe_list_eval(position)
selection = ''
r = _cmd.origin(_self._COb,selection,str(object),
(float(position[0]),
float(position[1]),
float(position[2])
),int(state)-1)
return r
def orient(selection="(all)", state=0, animate=0, *, _self=cmd):
'''
DESCRIPTION
"orient" aligns the principal components of the atoms in the
selection with the XYZ axes.
USAGE
orient [ selection [, state [, animate ]]]
ARGUMENTS
selection = a selection-expression or name-pattern {default: (all)}
state = 0: use all coordinate states {default}
state = -1: uses only coordinates for the current state
state > 0: uses coordinates for a specific state
EXAMPLES
orient organic
NOTES
The function is similar to the orient command in X-PLOR.
PYMOL API
cmd.orient(string object-or-selection, int state, float animate)
SEE ALSO
zoom, origin, reset
'''
# preprocess selection
selection = selector.process(selection)
with _self.lockcm:
return _cmd.orient(_self._COb,"("+selection+")",int(state)-1,float(animate))
def move(axis, distance, *, _self=cmd):
'''
DESCRIPTION
"move" translates the camera about one of the three primary axes.
USAGE
move axis, distance
EXAMPLES
move x, 3
move y, -1
PYMOL API
cmd.move(string axis, float distance)
SEE ALSO
turn, rotate, translate, zoom, center, clip
'''
with _self.lockcm:
return _cmd.move(_self._COb,str(axis),float(distance))
def enable(name='all', parents=0, *, _self=cmd):
'''
DESCRIPTION
"enable" turns on display of one or more objects and/or selections.
USAGE
enable name
ARGUMENTS
name = name-pattern or selection.
NOTES
If name matches a selection name, then selection indicator dots
are shown for atoms in that selection. If name is a
selection-expression, then all objects with atoms in that
selection are enabled.
For an object\'s content to be displayed in the 3D viewer, the
object must be enabled AND at least one of the available
representations must be shown.
PYMOL API
cmd.enable(string object-name)
EXAMPLES
enable target_protein # enables the target_protein object
enable 1dn2.* # enables all entities starting with 1dn2.
enable *lig # enables all entities ending with lig
SEE ALSO
show, hide, disable
'''
if name[0]=='(':
selection = selector.process(name)
with _self.lockcm:
r = _cmd.onoff_by_sele(_self._COb,selection,1)
else:
with _self.lockcm:
r = _cmd.onoff(_self._COb,str(name),1,int(parents));
return r
def disable(name='all', *, _self=cmd):
'''
DESCRIPTION
"disable" turns off display of one or more objects and/or selections.
USAGE
disable name
ARGUMENTS
name = name-pattern or selection.
PYMOL API
cmd.disable(string name)
SEE ALSO
show, hide, enable
'''
if name[0]=='(':
selection = selector.process(name)
with _self.lockcm:
r = _cmd.onoff_by_sele(_self._COb,selection,0)
else:
with _self.lockcm:
r = _cmd.onoff(_self._COb,str(name),0,0);
return r
def _rep_to_repmask(rep):
repn = 0
for rep in rep.split():
rep = repmasks_sc.auto_err(rep, 'representation')
repn |= repmasks[rep]
return repn
def toggle(representation="lines", selection="all", *, _self=cmd):
'''
DESCRIPTION
"toggle" toggles the visibility of a representation within a
selection.
USAGE
toggle [ representation [, selection ]]
ARGUMENTS
representation = string: named representation {default: lines}
selection = string: atom selection {default: all}
NOTES
If the representation is enabled for any atom in the selection, it will
be turned off.
PYMOL API
cmd.toggle(string representation, string selection)
SEE ALSO
show, hide
'''
with _self.lockcm:
if representation == 'object':
repn = -2
else:
repn = _rep_to_repmask(representation)
# preprocess selection
selection = selector.process(selection)
r = _cmd.toggle(_self._COb,str(selection),int(repn));
return r
def _showhide(rep, selection, value, _self):
if not selection and (rep in ("", "all") or '(' in rep or '/' in rep):
# rep looks like a selection
selection = rep
rep = "wire" if value else "everything"
selection = selector.process(selection) or "all"
repn = _rep_to_repmask(rep)
with _self.lockcm:
r = _cmd.showhide(_self._COb, str(selection), int(repn), value)
return r
def show(representation="wire", selection="", *, _self=cmd):
'''
DESCRIPTION
"show" turns on representations for objects and selections.
USAGE
show [ representation [, selection ]]
ARGUMENTS
representation = lines, spheres, mesh, ribbon, cartoon, sticks,
dots, surface, labels, extent, nonbonded, nb_spheres, slice,
extent, slice, dashes, angles, dihedrals, cgo, cell, callback,
or everything
selection = string: a selection-expression or name-pattern
NOTES
With no arguments, "show" alone turns on lines for all bonds and
nonbonded for all atoms in all molecular objects.
EXAMPLES
show
show ribbon
show lines, (name CA+C+N)
SEE ALSO
hide, enable, disable
'''
return _showhide(representation, selection, 1, _self)
def show_as(representation="wire", selection="", *, _self=cmd):
'''
DESCRIPTION
"as" turns on and off atom and bond representations.
USAGE
as representation [, selection ]
ARGUMENTS
representation = lines, spheres, mesh, ribbon, cartoon, sticks,
dots, surface, labels, extent, nonbonded, nb_spheres, slice,
extent, slice, dashes, angles, dihedrals, cgo, cell, callback,
volume or everything
selection = string {default: all}
EXAMPLES
as lines, name CA+C+N
as ribbon
PYMOL API
cmd.show_as(string representation, string selection)
NOTES
"selection" can be an object name
"as" alone will turn on lines and nonbonded and hide everything else.
SEE ALSO
show, hide, enable, disable
'''
return _showhide(representation, selection, 2, _self)
def hide(representation="everything", selection="", *, _self=cmd):
'''
DESCRIPTION
"hide" turns off atom and bond representations.
USAGE
hide [ representation [, selection ]]
ARGUMENTS
representation = lines, spheres, mesh, ribbon, cartoon,
sticks, dots, surface, labels, extent, nonbonded, nb_spheres,
slice, extent, slice, dashes, angles, dihedrals, cgo, cell, callback,
or everything
selection = string: a selection-expression or name-pattern
EXAMPLES
hide lines, all
hide ribbon
PYMOL API
cmd.hide(string representation, string selection)
SEE ALSO
show, enable, disable
'''
return _showhide(representation, selection, 0, _self)
def get_view(output=1, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"get_view" returns and optionally prints out the current view
information in a format which can be embedded into a command
script and can be used in subsequent calls to "set_view".
If a log file is currently open, get_view will not write the view
matrix to the screen unless the "output" parameter is 2.
USAGE
get_view [output]
ARGUMENTS
output = 0: output matrix to screen
output = 1: do not Output matrix to screen
output = 2: force output to screen even if log file is open
output = 3: return formatted string instead of a list
NOTES
Contents of the view matrix:
* 0 - 8: column-major 3x3 matrix which rotates model space to camera space
* 9 - 11: origin of rotation relative to camera (in camera space)
* 12 - 14: origin of rotation (in model space)
* 15: front plane distance from the camera
* 16: rear plane distance from the camera
* 17: orthoscopic flag (+/-) and field of view (if abs(value) > 1)
The camera always looks down -Z with its +X left and its +Y down.
Therefore, in the default view, model +X is to the observer\'s
right, +Y is upward, and +Z points toward the observer.
PYMOL API
cmd.get_view(output=1, quiet=1)
SEE ALSO
set_view
'''
with _self.lockcm:
r = _cmd.get_view(_self._COb)
if True:
output = int(output)
if True:
if (_self.get_setting_int("logging") != 0) and (output<3):
if not quiet:
print(" get_view: matrix written to log file.")
_self.log("_ set_view (\\\n","cmd.set_view((\\\n")
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[0:3] ,
" %14.9f, %14.9f, %14.9f,\\\n"%r[0:3])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[4:7] ,
" %14.9f, %14.9f, %14.9f,\\\n"%r[4:7])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[8:11] ,
" %14.9f, %14.9f, %14.9f,\\\n"%r[8:11])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[16:19],
" %14.9f, %14.9f, %14.9f,\\\n"%r[16:19])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[19:22],
" %14.9f, %14.9f, %14.9f,\\\n"%r[19:22])
_self.log("_ %14.9f, %14.9f, %14.9f )\n"%r[22:25] ,
" %14.9f, %14.9f, %14.9f ))\n"%r[22:25])
if output<2: # suppress if we have a log file open
output=0
if output and (not quiet) and (output<3):
print("### cut below here and paste into script ###")
print("set_view (\\")
print(" %14.9f, %14.9f, %14.9f,\\"%r[0:3])
print(" %14.9f, %14.9f, %14.9f,\\"%r[4:7])
print(" %14.9f, %14.9f, %14.9f,\\"%r[8:11])
print(" %14.9f, %14.9f, %14.9f,\\"%r[16:19])
print(" %14.9f, %14.9f, %14.9f,\\"%r[19:22])
print(" %14.9f, %14.9f, %14.9f )"%r[22:25])
print("### cut above here and paste into script ###")
if output==3:
return ("set_view (\\\n"+
" %14.9f, %14.9f, %14.9f,\\\n"%r[0:3] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[4:7] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[8:11] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[16:19] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[19:22] +
" %14.9f, %14.9f, %14.9f )\n"%r[22:25])
r = r[0:3]+r[4:7]+r[8:11]+r[16:25]
return r
def set_view(view,animate=0,quiet=1,hand=1, *, _self=cmd):
r'''
DESCRIPTION
"set_view" sets viewing information for the current scene,
including the rotation matrix, position, origin of rotation,
clipping planes, and the orthoscopic flag.
USAGE
set_view [ view ]
EXAMPLE
set_view (\
0.999876618, -0.000452542, -0.015699286,\
0.000446742, 0.999999821, -0.000372844,\
0.015699454, 0.000365782, 0.999876678,\
0.000000000, 0.000000000, -150.258514404,\
11.842411041, 20.648729324, 8.775371552,\
118.464958191, 182.052062988, 0.000000000 )
PYMOL API
cmd.set_view(string-or-sequence view)
SEE ALSO
get_view
'''
if isinstance(view, (str, bytes)):
view = safe_list_eval(view)
if len(view)!=18:
raise pymol.CmdException(
"bad view argument; should be a sequence of 18 floats")
with _self.lockcm:
r = _cmd.set_view(_self._COb,(
float(view[ 0]),float(view[ 1]),float(view[ 2]),0.0,
float(view[ 3]),float(view[ 4]),float(view[ 5]),0.0,
float(view[ 6]),float(view[ 7]),float(view[ 8]),0.0,
0.0,0.0,0.0,1.0,
float(view[ 9]),float(view[10]),float(view[11]),
float(view[12]),float(view[13]),float(view[14]),
float(view[15]),float(view[16]),float(view[17])),
int(quiet),float(animate),int(hand))
return r
def view(key, action='recall', animate=-1, *, _self=cmd):
'''
DESCRIPTION
"view" saves and restore camera views.
USAGE
view key [, action [, animate]]
ARGUMENTS
key = string or *
action = store, recall, clear: {default: recall}
NOTES
Views F1 through F12 are automatically bound to function keys
provided that "set_key" has not been used to redefine the
behaviour of the respective key, and that a "scene" has not been
defined for that key.
EXAMPLES
view 0, store
view 0
PYMOL API
cmd.view(string key, string action)
SEE ALSO
scene, set_view, get_view
'''
pymol=_self._pymol
if key=='*':
action = view_sc.auto_err(action,'action')
if action=='clear':
pymol._view_dict = {}
pymol._view_dict_sc = Shortcut(pymol._view_dict.keys())
else:
print(" view: stored views:")
lst = list(pymol._view_dict.keys())
lst.sort()
parsing.dump_str_list(lst)
else:
action = view_sc.auto_err(action,'action')
if action=='recall':
key = pymol._view_dict_sc.auto_err(key,'view')
_self.set_view(pymol._view_dict[key],animate=animate)
if _feedback(fb_module.scene,fb_mask.actions,_self): # redundant
print(" view: \"%s\" recalled."%key)
elif (action=='store') or (action=='update'):
pymol._view_dict_sc.append(key)
pymol._view_dict[key]=_self.get_view(0)
if _feedback(fb_module.scene,fb_mask.actions,_self):
print(" view: view "+action+"d as \"%s\"."%key)
elif action=='clear':
key = pymol._view_dict_sc.auto_err(key,'view')
if key in pymol._view_dict:
del pymol._view_dict[key]
pymol._view_dict_sc = Shortcut(pymol._view_dict.keys())
if _feedback(fb_module.scene,fb_mask.actions,_self): # redundant
print(" view: '%s' deleted."%key)
def get_viewport(output=1, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"get_viewport" returns and optionally prints out the screen viewport size
USAGE
get_viewport [output]
ARGUMENTS
output = 0: do not print to screen
output = 1 {default}: print to screen if not logging and not quiet
output = 2: force output to screen even if log file is open
PYMOL API
cmd.get_viewport(output=1, quiet=1)
'''
output = int(output)
with _self.lockcm:
r = _cmd.get_viewport(_self._COb)
if _self.get_setting_int("logging") and output < 3:
_self.log(f"_ viewport {r[0]}, {r[1]}\n", f"cmd.viewport{r}\n")
if not quiet:
print(" get_viewport: data written to log file.")
if output < 2: # suppress if we have a log file open
output = 0
if (0 < output < 3) and not quiet:
print("### cut below here and paste into script ###")
print("viewport %4d, %4d" % r)
print("### cut above here and paste into script ###")
if output == 3:
colorprinting.warning(" Warning: get_viewport(3) is deprecated")
return "viewport ( %4d, %4d )\n" % r
return r
def get_vis(_self=cmd):
with _self.lockcm:
return _cmd.get_vis(_self._COb)
def set_vis(dict, *, _self=cmd):
with _self.lockcm:
return _cmd.set_vis(_self._COb, dict)
def get_colorection(key, *, _self=cmd):
with _self.lockcm:
return _cmd.get_colorection(_self._COb, key)
def set_colorection(dict,key, *, _self=cmd):
with _self.lockcm:
return _cmd.set_colorection(_self._COb, dict, key)
def del_colorection(dict,key, *, _self=cmd):
with _self.lockcm:
return _cmd.del_colorection(_self._COb, dict, key)
def get_scene_list(_self=cmd):
with _self.lockcm:
return _cmd.get_scene_order(_self._COb)
def chain_session(_self=cmd):
import os
# assumes locked interpreter
r = 0
session_file = str(_self.get("session_file"))
re_pat = re.compile("[0-9]+\.")
if len(session_file): # find next session file, if it exists
mo = re_pat.search(session_file)
if mo is not None:
pat = mo.group(0)
if len(pat):
file_no = int(float(pat)) + 1
new_form = r"%0"+str(len(pat)-1)+"d."
for new_num in range(file_no, file_no+11):
new_pat = new_form % new_num
new_file = re_pat.sub(new_pat, session_file)
# try both PSE and PSW
if not os.path.exists(new_file):
new_file = re.sub("\.pse$",".psw",new_file,re.I)
if not os.path.exists(new_file):
new_file = re.sub("\.psw$",".pse",new_file,re.I)
if os.path.exists(new_file):
_self.do("_ cmd.load(r'''"+new_file+"''',format='psw')")
return 1
return 0
def scene_order(names,sort=0,location='current',quiet=1, *, _self=cmd):
'''
DESCRIPTION
"scene_order" changes the ordering of scenes.
USAGE
scene_order names, sort, location
ARGUMENTS
names = string: a space-separated list of names
sort = yes or no {default: no}
location = top, current, or bottom {default: current}
EXAMPLES
scene_order *,yes
scene_order F6 F4 F3
scene_order 003 006 004, location=top
# if names have spaces
cmd.scene_order(["name 1", "name 2"])
PYMOL API
cmd.scene_order(names: Union[list, str], sort: str, location: str)
SEE ALSO
scene
'''
location = location_sc.auto_err(location,'location')
if is_string(sort):
sort=boolean_dict[boolean_sc.auto_err(sort,'sort option')]
if isinstance(names, str):
names = names.split()
with _self.lockcm:
return _cmd.scene_order(_self._COb, names, sort, location)
def _scene_get_current_message(_self=cmd):
wiz = _self.get_wizard()
return '\n'.join(wiz.message) if (wiz is not None
and wiz.__class__.__name__ == 'Message'
and hasattr(wiz, 'from_scene')) else None
def scene_recall_message(message, *, _self=cmd):
'''
INTERNAL, DO NOT USE.
Display a scene message.
'''
wiz = _self.get_wizard()
replace_flag = (wiz is not None
and wiz.__class__.__name__ == 'Message'
and hasattr(wiz, 'from_scene'))
if message:
if is_string(message):
message = message.splitlines()
elif not is_list(message):
raise TypeError("message %s" % (type(message)))
wizard_func = _self.replace_wizard if replace_flag else _self.wizard
wizard_func("message", *message)
_self.get_wizard().from_scene = 1
elif replace_flag:
_self.wizard()
def scene(key='auto', action='recall', message=None, view=1,
color=1, active=1, rep=1, frame=1, animate=-1,
new_key=None, hand=1, quiet=1, sele="all", *, _self=cmd):
'''
DESCRIPTION
"scene" saves and restores scenes. A scene consists of the camera
view, all object activity information, all atom-wise visibilities,
all atom-wise colors, all representations, the global frame index,
and may contain a text message to display on playback.
USAGE
scene [key [,action [, message, [ new_key=new-key-value ]]]]
ARGUMENTS
key = string, new, auto, or *: use new for an automatically
numbered new scene, use auto for the current scene (if one
exists), and use * for all scenes (clear and recall actions only).
action = store, recall, insert_after, insert_before, next,
previous, update, rename, or clear: (default = recall). If
rename, then a new_key argument must be explicitly defined.
message = string: a text message to display with the scene.
new_key = string: the new name for the scene
EXAMPLES
scene *
scene F1, store
scene F2, store, Please note the critical hydrogen bond shown in yellow.
scene F1
scene F2
scene F1, rename, new_key=F5
NOTES
Scenes F1 through F12 are automatically bound to function keys
provided that "set_key" has not been used to redefine the behaviour
of the respective key.
SEE ALSO
view, set_view, get_view
'''
action = scene_action_sc.auto_err(action, 'action')
if is_list(message):
message = '\n'.join(message)
# default when called with no arguments
if key == 'auto':
if action == 'recall':
action = 'next'
# preserve message on update
if action == 'update':
if message is None:
message = _scene_get_current_message(_self)
# aliases (DEPRECATED)
if action == 'clear':
action = 'delete'
elif action == 'append' or action == 'update':
action = 'store'
# presentation auto quit
if (pymol._scene_quit_on_action == action and
action in ('next', 'previous') and
_self.get_setting_boolean("presentation") and
_self.get_setting_boolean("presentation_auto_quit") and
_self.get("scene_current_name") == ""):
if not chain_session(_self):
_self.quit()
# call C function
with _self.lockcm:
r = _cmd.scene(_self._COb, key, action, message, int(view),
int(color), int(active), int(rep), int(frame),
float(animate), new_key, int(hand), sele)
# for presentation auto quit
pymol._scene_quit_on_action = action
return r
def _legacy_scene(key='auto', action='recall', message=None, view=1,
color=1, active=1, rep=1, frame=1, animate=-1,
new_key=None, hand=1, quiet=1, *, _self=cmd):
''' FOR INTERNAL USE ONLY. Stores and deletes <=1.7.4 compatible scenes. '''
pymol=_self._pymol
view = int(view)
rep = int(rep)
color = int(color)
active = int(active)
frame = int(frame)
quiet = int(quiet)
animate = 0
with _self.lockcm:
if key=='*':
if action=='clear':
for key in pymol._scene_dict:
# free selections
scene_list = pymol._scene_dict[key]
if len(scene_list)>3:
colorection = scene_list[3]
if colorection is not None:
_self.del_colorection(colorection,key)
name = "_scene_"+key+"_*"
_self.delete(name)
else:
raise ValueError('action=' + action)
else:
if action == 'store':
if key in ('new', 'auto'):
raise ValueError('key=' + key)
if key in pymol._scene_dict:
raise RuntimeError('update not supported')
if rep:
for rep_name in rep_list:
name = "_scene_"+key+"_"+rep_name
_self.select(name,"rep "+rep_name)
if is_string(message):
if message:
if (message[0:1] in [ '"',"'"] and
message[-1:] in [ '"',"'"]):
message=message[1:-1]
else:
message = message.splitlines()
pymol._scene_dict[key] = [
_self.get_view(0) if view else None,
_self.get_vis() if active else None,
_self.get_frame() if frame else None,
_self.get_colorection(key) if color else None,
1 if rep else None,
message,
]
else:
raise ValueError('action=' + action)
def session_save_views(session, *, _self=cmd):
pymol=_self._pymol
session['view_dict']=copy.deepcopy(pymol._view_dict)
return 1
def session_restore_views(session, *, _self=cmd):
pymol=_self._pymol
if 'view_dict' in session:
pymol._view_dict=copy.deepcopy(session['view_dict'])
pymol._view_dict_sc.rebuild(list(pymol._view_dict.keys()))
return 1
def session_restore_scenes(session, *, _self=cmd):
# Restore scenes from old session files (<= 1.7.4)
if 'scene_dict' in session:
_self.scene('*', 'clear')
# save initial scene
tempname = '_initial_scene'
while tempname in session['scene_dict']:
tempname += '_'
_self.scene(tempname, 'store')
frame = 0
if _self.get_movie_playing():
_self.mstop()
frame = _self.get_frame()
for key, data in list(session['scene_dict'].items()):
_convert_legacy_scene(key, data, _self)
if frame:
_self.frame(frame)
_self.mplay()
# restore initial scene
_self.scene(tempname, 'recall', animate=0)
_self.scene(tempname, 'clear')
if 'scene_order' in session:
_self.scene_order(' '.join(session['scene_order']))
return 1
def _convert_legacy_scene(key, scene_list, _self=cmd):
# Create a scene from the given legacy scene list and finally delete
# the colorection and rep selections.
scene_list += [None] * 5
view, active, frame, color, rep = [(0 if x is None else 1)
for x in scene_list[:5]]
if frame:
_self.frame(scene_list[2])
if view:
_self.set_view(scene_list[0], 0.0)
if active:
_self.disable()
_self.deselect()
_self.set_vis(scene_list[1])
if color:
_self.set_colorection(scene_list[3], key)
_self.del_colorection(scene_list[3], key)
if rep:
# only atomic representations
_self.hide('everything', '(*)')
sele_prefix = _self.get_legal_name('_scene_' + key + '_')
for rep_name in rep_list:
_self.show(rep_name, "?" + sele_prefix + rep_name)
_self.delete(sele_prefix + "*")
_self.scene(key, 'store', scene_list[5], view, color, active, rep, frame)
def stereo(toggle='on', quiet=1, *, _self=cmd):
'''
DESCRIPTION
"stereo" activates or deactives stereo mode.
USAGE
stereo [toggle]
ARGUMENTS
toggle = on, off, crosseye, walleye, quadbuffer, sidebyside, geowall, or openvr
EXAMPLES
stereo on
stereo off
stereo crosseye
NOTES
"quadbuffer" is the default stereo mode if hardware stereo is available.
otherwise, "crosseye" is the default.
PYMOL API
cmd.stereo(string toggle)
'''
toggle = stereo_dict[stereo_sc.auto_err(str(toggle),'toggle')]
with _self.lockcm:
return _cmd.stereo(_self._COb, toggle)
def turn(axis, angle, *, _self=cmd):
'''
DESCRIPTION
"turn" rotates the camera about one of the three primary axes,
centered at the origin.
USAGE
turn axis, angle
EXAMPLES
turn x, 90
turn y, 45
PYMOL API
cmd.turn(string axis, float angle)
SEE ALSO
move, rotate, translate, zoom, center, clip
'''
with _self.lockcm:
r = _cmd.turn(_self._COb,str(axis),float(angle))
return r
def full_screen(toggle=-1, *, _self=cmd):
'''
DESCRIPTION
"full_screen" enables or disables full screen mode.
USAGE
full_screen [toggle]
EXAMPLES
full_screen
full_screen on
full_screen off
NOTES
This does not work correctly on all platforms. If you encounter
trouble, try using the maximize button on the viewer window
instead.
'''
toggle = toggle_dict[toggle_sc.auto_err(str(toggle),'toggle')]
with _self.lockcm:
if _self.is_gui_thread():
return _cmd.full_screen(_self._COb,int(toggle))
return _self._do("full_screen %s" % (toggle), echo=0)
def rock(mode=-1, *, _self=cmd):
'''
DESCRIPTION
"rock" toggles Y axis rocking.
USAGE
rock
PYMOL API
cmd.rock()
'''
with _self.lockcm:
r = _cmd.rock(_self._COb,int(mode))
return r
def label(selection="(all)", expression="", quiet=1, *, _self=cmd):
'''
DESCRIPTION
"label" labels one or more atoms in a selection by evaluating an
Python expression referencing properties for each atom.
USAGE
label [ selection [, expression ]]
ARGUMENTS
selection = string: a selection-expression
expression = string: a Python expression that can be converted to a string
EXAMPLES
label chain A, chain
label name CA,"%s-%s" % (resn,resi)
label resi 200,"%1.3f" % partial_charge
NOTES
The symbols defined in the label name space for each atom are:
name, resi, resn, resv, chain, segi, model, alt, q, b, type,
index, rank, ID, ss, vdw, elec_radius, label, elem, geom,
flags, color, cartoon, valence, formal_charge, partial_charge,
numeric_type, text_type, stereo
All strings in the expression must be explicitly quoted.
This operation typically takes several seconds per thousand atoms
labelled.
To clear labels, simply omit the expression or set it to ''.
'''
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
return _cmd.label(_self._COb, selection, expression, quiet)
def label2(selection="(all)", expression="", quiet=1, *, _self=cmd):
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
return _cmd.label2(_self._COb, selection, expression, quiet)
def window(action='show', x=0, y=0, width=0, height=0, *, _self=cmd):
'''
DESCRIPTION
"window" controls the visibility of PyMOL\'s output window
USAGE
window [ action [, x [, y [, width [, height ]]]]]
PYMOL API
cmd.window(string action, int x, int y, int width, int height)
'''
action = window_sc.auto_err(action,'action')
action = window_dict[str(action)]
with _self.lockcm:
from pymol.gui import get_qtwindow as getPyMOLWindow
qt_window = getPyMOLWindow()
if qt_window:
r = DEFAULT_SUCCESS
qt_window.window_cmd(action, int(x),int(y),int(width),int(height))
else:
r = _cmd.window(_self._COb,action,int(x),int(y),int(width),int(height))
return r
def viewport(width=-1,height=-1, *, _self=cmd):
'''
DESCRIPTION
"viewport" changes the size of the graphics display area.
USAGE
viewport width, height
PYMOL API
cmd.viewport(int width, int height)
'''
if cmd.is_string(width) and height == -1:
width = _self.safe_eval(width)
if _self.is_sequence(width):
colorprinting.warning(" Warning: Tuple-syntax (parentheses) "
"for viewport is deprecated")
width, height = width
if not cmd.is_gui_thread():
_self.do("viewport %d,%d"%(int(width),int(height)),0)
return None
with _self.lockcm:
return _cmd.viewport(_self._COb, int(width), int(height))
def bg_color(color="black", *, _self=cmd):
'''
DESCRIPTION
"bg_color" sets the background color.
USAGE
bg_color [ color ]
ARGUMENTS
color = string: color name or number {default: black}
EXAMPLES
bg_color grey30
bg_color
NOTES
To obtain a transparent background, "unset opaque_background", and
then use "ray".
SEE ALSO
set_color, ray
PYMOL API
cmd.bg_color(string color)
'''
color = _self._interpret_color(_self,color)
with _self.lockcm:
r = _cmd.bg_color(_self._COb,str(color))
return r
cartoon_dict = {
'skip' : -1,
'automatic' : 0,
'loop' : 1,
'rectangle' : 2,
'oval' : 3,
'tube' : 4,
'arrow' : 5,
'dumbbell' : 6,
'putty' : 7,
'dash' : 8,
'cylinder' : 9,
}
cartoon_sc = Shortcut(cartoon_dict.keys())
def cartoon(type, selection="(all)", *, _self=cmd):
'''
DESCRIPTION
"cartoon" changes the default cartoon representation for a set of atoms.
USAGE
cartoon type, selection
ARGUMENTS
type = automatic, skip, loop, rectangle, oval, tube, arrow, dumbbell
PYMOL API
cmd.cartoon(string type, string selection)
EXAMPLES
cartoon rectangle, chain A
cartoon skip, resi 145-156
NOTES
This command is rarely required since the default "automatic" mode
chooses cartoons according to the information in the PDB HELIX and
SHEET records.
'''
# preprocess selection
selection = selector.process(selection)
#
type = cartoon_dict[cartoon_sc.auto_err(str(type),'type')];
with _self.lockcm:
return _cmd.cartoon(_self._COb, selection, int(type))
def _ray(width,height,antialias,angle,shift,renderer,quiet,_self=cmd):
r = DEFAULT_ERROR
try:
_self.lock_without_glut()
try:
_cmd.set_busy(_self._COb,1)
r = _cmd.render(_self._COb,int(width),int(height),
int(antialias),
float(angle),
float(shift),int(renderer),
int(quiet))
finally:
_cmd.set_busy(_self._COb,0)
finally:
_self.unlock(r)
return r
def capture(quiet=1, *, _self=cmd):
_self.draw(antialias=-2,quiet=quiet)
def draw(width=0, height=0, antialias=-1, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"draw" creates an OpenGL-based image of the current frame.
USAGE
draw [width [,height [,antialias ]]]
ARGUMENTS
width = integer {default: 0 (current)}
height = integer {default: 0 (current)}
antialias = integer {default: -1 (use antialias setting)}
EXAMPLES
draw
draw 1600
NOTES
Default width and height are taken from the current viewpoint. If
one is specified but not the other, then the missing value is
scaled so as to preserve the current aspect ratio.
Because this feature uses the OpenGL rendering context to piece
together the image, it does not work when running in the
command-line only mode.
On certain graphics hardware, "unset opaque_background" followed
by "draw" will produce an image with a transparent background.
However, better results can usually be obtained using "ray".
PYMOL API
cmd.draw(int width, int height, int antialias, int quiet)
SEE ALSO
ray, png, save
'''
# stop movies and sculpting if they're on...
if _self.get_movie_playing():
_self.mstop()
if _self.get_setting_boolean("sculpting"):
_self.set("sculpting","off",quiet=1)
# make sure that there aren't any pending display events
# TODO breaks QOpenGLWidget
# _self.refresh()
#
with _self.lockcm:
r = _cmd.draw(_self._COb,int(width),int(height),
int(antialias),int(quiet))
return r
def ray(width=0, height=0, antialias=-1, angle=0.0, shift=0.0,
renderer=-1, quiet=1, async_=0, _self=cmd, **kwargs):
'''
DESCRIPTION
"ray" creates a ray-traced image of the current frame. This
can take some time (up to several minutes, depending on image
complexity).
USAGE
ray [width [,height [,antialias [,angle [,shift [,renderer [,quiet
[,async ]]]]]]]]]
ARGUMENTS
width = integer {default: 0 (current)}
height = integer {default: 0 (current)}
antialias = integer {default: -1 (use antialias setting)}
angle = float: y-axis rotation for stereo image generation
{default: 0.0}
shift = float: x-axis translation for stereo image generation
{default: 0.0}
renderer = -1, 0, 1, or 2: respectively, default, built-in,
pov-ray, or dry-run {default: 0}
async = 0 or 1: should rendering be done in a background thread?
EXAMPLES
ray
ray 1024,768
ray renderer=2
NOTES
Default width and height are taken from the current viewpoint. If
one is specified but not the other, then the missing value is
scaled so as to preserve the current aspect ratio.
angle and shift can be used to generate matched stereo pairs
renderer = 1 uses PovRay. This is Unix-only and you must have
"povray" in your path. It utilizes two two temporary files:
"tmp_pymol.pov" and "tmp_pymol.png".
See "help faster" for optimization tips with the builtin renderer.
See "help povray" for how to use PovRay instead of PyMOL\'s
built-in ray-tracing engine.
PYMOL API
cmd.ray(int width, int height, int antialias, float angle,
float shift, int renderer, int quiet, int async)
SEE ALSO
draw, png, save
'''
async_ = int(kwargs.pop('async', async_))
if kwargs:
raise pymol.CmdException('unknown argument: ' + ', '.join(kwargs))
arg_tup = (int(width),int(height),
int(antialias),float(angle),
float(shift),int(renderer),int(quiet),_self)
# stop movies, rocking, and sculpting if they're on...
if _self.get_movie_playing():
_self.mstop()
if _self.get_setting_boolean("sculpting"):
_self.set("sculpting","off",quiet=1)
if _self.rock(-2)>0:
_self.rock(0)
#
if not async_:
r = _ray(*arg_tup)
else:
render_thread = threading.Thread(target=_ray, args=arg_tup)
render_thread.setDaemon(1)
render_thread.start()
r = DEFAULT_SUCCESS
return r
def refresh(_self=cmd):
'''
DESCRIPTION
"refresh" causes the scene to be redrawn as soon as the operating
system allows it to be done.
USAGE
refresh
PYMOL API
cmd.refresh()
SEE ALSO
rebuild
'''
with _self.lockcm:
if _self.is_gui_thread():
return _cmd.refresh_now(_self._COb)
return _self._do("_ cmd._refresh()")
def reset(object='', *, _self=cmd):
'''
DESCRIPTION
"reset" restores the rotation matrix to identity, sets the origin
to the center of mass (approx.) and zooms the window and clipping
planes to cover all objects. Alternatively, it can reset object
matrices.
USAGE
reset [ object ]
PYMOL API
cmd.reset()
'''
with _self.lockcm:
return _cmd.reset(_self._COb, str(object))
def dirty(_self=cmd): # OBSOLETE?
with _self.lockcm:
r = _cmd.dirty(_self._COb)
return r
def meter_reset(_self=cmd):
'''
DESCRIPTION
"meter_reset" resets the frames per secound counter.
USAGE
meter_reset
'''
with _self.lockcm:
r = _cmd.reset_rate(_self._COb)
return r
def load_png(filename, movie=1, stereo=-1, quiet=0, *, _self=cmd):
'''
DESCRIPTION
"load_png" loads and displays a PNG file from disk.
USAGE
load_png filename
NOTES
If the displayed image is too big for the window, it will be
reduced 2-fold repeatedly until it fits.
'''
filename = _self.exp_path(str(filename))
with _self.lockcm:
return _cmd.load_png(_self._COb, filename, int(movie), int(stereo),
int(quiet))
def rebuild(selection='all',representation='everything', *, _self=cmd):
'''
DESCRIPTION
"rebuild" forces PyMOL to recreate geometric objects in
case any of them have gone out of sync.
USAGE
rebuild [selection [, representation ]]
ARGUMENTS
selection = string {default: all}
representation = string: {default: everything}
PYMOL API
cmd.rebuild(string selection, string representation)
SEE ALSO
refresh
'''
selection = selector.process(selection)
representation = repres_sc.auto_err(representation,'representation')
repn = repres[representation];
with _self.lockcm:
return _cmd.rebuild(_self._COb, selection, repn)
def recolor(selection='all', representation='everything', *, _self=cmd):
'''
DESCRIPTION
"recolor" forces reapplication of colors to existing objects.
USAGE
recolor [selection [, representation ]]
ARGUMENTS
selection = string {default: all}
representation = string {default: everything}
NOTES
This command often needs to be executed after "set_color" has been
used to redefine one or more existing colors.
PYMOL API
cmd.recolor(string selection = 'all', string representation = 'everything')
SEE ALSO
color, set_color
'''
selection = selector.process(selection)
representation = repres_sc.auto_err(representation,'representation')
repn = repres[representation];
with _self.lockcm:
return _cmd.recolor(_self._COb, selection, repn)
def color(color, selection="(all)", quiet=1, flags=0, *, _self=cmd):
'''
DESCRIPTION
"color" changes the color of objects or atoms.
USAGE
color color [, selection ]
ARGUMENTS
color = string: color name or number
selection = string: selection-expression or name-pattern
corresponding to the atoms or objects to be colored
{default: (all)}.
NOTES
When using color ramps, the ramp can be used as a color.
PYMOL API
cmd.color(string color, string selection, int quiet)
SEE ALSO
color_deep, set_color, recolor
EXAMPLE
color cyan
color yellow, chain A
'''
# preprocess selection
selection = selector.process(selection)
color = _self._interpret_color(_self,str(color))
with _self.lockcm:
return _cmd.color(_self._COb, str(color), str(selection),
int(flags), int(quiet))
def color_deep(color, name='all', quiet=1, *, _self=cmd):
'''
DESCRIPTION
Unset all object and atom level (not global) color settings and
apply given color.
ARGUMENTS
color = str: color name or number
name = str: object name or pattern {default: all}
SEE ALSO
color, unset_deep
'''
from pymol.menu import rep_setting_lists
_self.unset_deep([s for L in rep_setting_lists for (r, s) in L if s],
name, updates=0, quiet=quiet)
_self.color(color, name, quiet=quiet)
import colorsys
_spectrumany_interpolations = {
'hls': (colorsys.rgb_to_hls, colorsys.hls_to_rgb),
'hsv': (colorsys.rgb_to_hsv, colorsys.hsv_to_rgb),
'rgb': ((lambda *rgb: rgb), (lambda *rgb: rgb)),
}
def spectrumany(expression, colors, selection='(all)', minimum=None,
maximum=None, quiet=1, interpolation='rgb', *, _self=cmd):
'''
DESCRIPTION
Pure python implementation of the spectrum command. Supports arbitrary
color lists instead of palettes and any numerical atom property which
works in iterate as expression.
Non-numeric values (like resn) will be enumerated.
This is not a separate PyMOL command but is used as a fallback in "spectrum".
'''
from . import CmdException
try:
from_rgb, to_rgb = _spectrumany_interpolations[interpolation]
except KeyError:
raise CmdException('interpolation must be one of {}'.format(
list(_spectrumany_interpolations)))
if ' ' not in colors:
colors = palette_colors_dict.get(colors) or colors.replace('_', ' ')
quiet, colors = int(quiet), colors.split()
n_colors = len(colors)
if n_colors < 2:
raise CmdException('please provide at least 2 colors')
col_tuples = [_self.get_color_tuple(i) for i in colors]
if None in col_tuples:
raise CmdException('unknown color')
col_tuples = [from_rgb(*c) for c in col_tuples]
expression = {'pc': 'partial_charge', 'fc': 'formal_charge',
'resi': 'resv'}.get(expression, expression)
if expression == 'count':
e_list = list(range(_self.count_atoms(selection)))
else:
e_list = []
_self.iterate(selection, 'e_list.append(%s)' % (expression), space=locals())
try:
v_list = [float(v) for v in e_list if v is not None]
except (TypeError, ValueError):
if not quiet:
print(' Spectrum: Expression is non-numeric, enumerating values')
v_list = e_list = list(map(sorted(set(e_list)).index, e_list))
if not v_list:
return (0., 0.)
if minimum is None: minimum = min(v_list)
if maximum is None: maximum = max(v_list)
r = minimum, maximum = float(minimum), float(maximum)
if not quiet:
print(' Spectrum: range (%.5f to %.5f)' % r)
val_range = maximum - minimum
if not val_range:
_self.color(colors[0], selection)
return r
e_it = iter(e_list)
def next_color():
v = next(e_it)
if v is None:
return False
v = min(1.0, max(0.0, (float(v) - minimum) / val_range)) * (n_colors - 1)
i = min(int(v), n_colors - 2)
p = v - i
col = [(col_tuples[i+1][j] * p + col_tuples[i][j] * (1.0 - p))
for j in range(3)]
rgb = [int(0xFF * v) for v in to_rgb(*col)]
return 0x40000000 + rgb[0] * 0x10000 + rgb[1] * 0x100 + rgb[2]
_self.alter(selection, 'color = next_color() or color', space=locals())
_self.recolor(selection)
return r
def spectrum(expression="count", palette="rainbow",
selection="(all)", minimum=None, maximum=None,
byres=0, quiet=1, interpolation='rgb', *, _self=cmd):
'''
DESCRIPTION
"spectrum" colors atoms with a spectrum of colors based on an atomic
property.
USAGE
spectrum [expression [, palette [, selection [, minimum [, maximum [, byres ]]]]]]
ARGUMENTS
expression = count, b, q, or pc: respectively, atom count, temperature factor,
occupancy, or partial charge {default: count}
palette = string: palette name or space separated list of colors
{default: rainbow}
selection = string: atoms to color {default: (all)}
minimum = float: {default: None (automatic)}
maximum = float: {default: None (automatic)}
byres = integer: controls whether coloring is applied per-residue {default: 0}
EXAMPLES
spectrum b, blue_red, minimum=10, maximum=50
spectrum count, rainbow_rev, chain A, byres=1
NOTES
Available palettes include:
blue_green blue_magenta blue_red blue_white_green
blue_white_magenta blue_white_red blue_white_yellow blue_yellow
cbmr cyan_magenta cyan_red cyan_white_magenta cyan_white_red
cyan_white_yellow cyan_yellow gcbmry green_blue green_magenta
green_red green_white_blue green_white_magenta green_white_red
green_white_yellow green_yellow green_yellow_red magenta_blue
magenta_cyan magenta_green magenta_white_blue
magenta_white_cyan magenta_white_green magenta_white_yellow
magenta_yellow rainbow rainbow2 rainbow2_rev rainbow_cycle
rainbow_cycle_rev rainbow_rev red_blue red_cyan red_green
red_white_blue red_white_cyan red_white_green red_white_yellow
red_yellow red_yellow_green rmbc yellow_blue yellow_cyan
yellow_cyan_white yellow_green yellow_magenta yellow_red
yellow_white_blue yellow_white_green yellow_white_magenta
yellow_white_red yrmbcg
PYMOL API
def spectrum(string expression, string palette,
string selection, float minimum, float maximum,
int byres, int quiet)
'''
palette_hit = palette_sc.shortcut.get(palette)
if palette_hit:
palette = palette_hit
if not expression.replace('_', '').isalpha() or not palette_hit:
return spectrumany(expression, palette, selection,
minimum, maximum, quiet, interpolation, _self=_self)
(prefix,digits,first,last) = palette_dict[str(palette)]
if (maximum is None) or (minimum is None):
minimum = 0 # signal to auto-adjust levels
maximum = -1
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
r = _cmd.spectrum(_self._COb,str(selection),str(expression),
float(minimum),float(maximum),
int(first),int(last),str(prefix),
int(digits),int(byres),int(quiet))
return r
def set_color(name, rgb, mode=0, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"set_color" defines a new color using the red, green, and blue
(RGB) color components.
USAGE
set_color name, rgb
ARGUMENTS
name = string: name for the new or existing color
rgb = list of numbers: [red, green, blue] each and all in the range
(0.0, 1.0) or (0, 255)
EXAMPLES
set_color red, [ 1.0, 0.0, 0.0 ]
set_color yellow, [ 255, 255, 0 ]
NOTES
PyMOL automatically infers the range based on the input arguments.
It may be necessary to issue "recolor" command in order to force
recoloring of existing objects.
SEE ALSO
recolor
PYMOL API
cmd.set_color(string name, list-of-numbers rgb, int mode )
'''
if isinstance(rgb, (str, bytes)):
rgb = safe_list_eval(rgb)
if not isinstance(rgb, (list, tuple)) or len(rgb) != 3:
raise pymol.CmdException(
"color specification must be a list such as [ 1.0, 0.0, 0.0 ]")
rgb = [float(c) for c in rgb]
if rgb[0] > 1.0 or rgb[1] > 1.0 or rgb[2] > 1.0:
rgb = [c / 0xFF for c in rgb]
with _self.lockcm:
r = _cmd.colordef(_self._COb, str(name), rgb[0], rgb[1], rgb[2],
int(mode), int(quiet))
_self._invalidate_color_sc()
return r
# Aliases for Mother England.
colour = color
set_colour = set_color
bg_colour = bg_color
recolour = recolor
def ipython_image(*args, _self=cmd, **kwargs):
"""Render the scene and return the image as an IPython.display.Image.
All arguments are forwarded to cmd.png().
@rtype IPython.display.Image
"""
import os, tempfile
from IPython.display import Image
filename = tempfile.mktemp(".png")
_self.png(filename, *args, **kwargs)
try:
return Image(filename)
finally:
os.unlink(filename)
|
athenad.py | #!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import sys
import queue
import random
import select
import socket
import threading
import time
from collections import namedtuple
from functools import partial
from typing import Any
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, WebSocketException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common.api import Api
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.swaglog import cloudlog, SWAGLOG_DIR
import selfdrive.crash as crash
from selfdrive.version import dirty, origin, branch, commit
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = set([8022])
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
log_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id'])
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event)),
threading.Thread(target=ws_send, args=(ws, end_event)),
threading.Thread(target=upload_handler, args=(end_event,)),
threading.Thread(target=log_handler, args=(end_event,)),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,))
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "result" in data and "id" in data:
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
while not end_event.is_set():
try:
item = upload_queue.get(timeout=1)
if item.id in cancelled_uploads:
cancelled_uploads.remove(item.id)
continue
_do_upload(item)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=10)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0):
destination = {
"latitude": latitude,
"longitude": longitude,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
@dispatcher.add_method
def listDataDirectory():
files = [os.path.relpath(os.path.join(dp, f), ROOT) for dp, dn, fn in os.walk(ROOT) for f in fn]
return files
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time() * 1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
return [item._asdict() for item in list(upload_queue.queue)]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = set(item.id for item in list(upload_queue.queue))
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
params = Params()
dongle_id = params.get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# return logs in order they should be sent
# excluding most recent (active) log file
return sorted(logs[:-1])
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
log_retries = 0
while not end_event.is_set():
try:
try:
result = json.loads(log_recv_queue.get(timeout=1))
log_success = result.get("success")
log_entry = result.get("id")
log_path = os.path.join(SWAGLOG_DIR, log_entry)
if log_entry and log_success:
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
except queue.Empty:
pass
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# never send last log file because it is the active log
# and only send one log file at a time (most recent first)
if not len(log_files) or not log_send_queue.empty():
continue
log_entry = log_files.pop()
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path, "r") as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
log_send_queue.put_nowait(json.dumps(jsonrpc))
except OSError:
pass # file could be deleted by log rotation
log_retries = 0
except Exception:
cloudlog.exception("athena.log_handler.exception")
log_retries += 1
if log_retries != 0:
time.sleep(backoff(log_retries))
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
ssock.close()
local_sock.close()
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
signal_sock.close()
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.wc_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = log_send_queue.get(timeout=1)
ws.send(data)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=1.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
ws.settimeout(1)
conn_retries = 0
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("LastAthenaPingTime")
except Exception:
crash.capture_exception()
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
FactAnalyzer.py | """
Home-Monitor:
AI system for the detection of anomalous and possibly harmful events for people.
Written by Gabriel Rojas - 2019
Copyright (c) 2019 G0 S.A.S.
See LICENSE file for details
Class information:
Generic class that represents all the analyzers that can be loaded.
"""
if __name__ == "__main__":
print('\n\tAlert!! This class can not start itself. Please start using main.py file.')
exit(0)
import sys
from os.path import dirname, normpath
from multiprocessing import Process, Queue, Value
from time import time, sleep
import abc
import Misc
from Component import Component
from DataPool import Data, Messages, LogTypes, SourceTypes
from LoaderOfChannel import LoaderOfChannel
class FactAnalyzer(Component):
""" Generic class that represents all the analyzers that can be loaded. """
MODEL = None
CLASSES = []
DATA_FILTER = Data()
Limit:int = -1
LastTime:float = -1
LoaderOfChannelsThread:Process = None
queueMessages:Queue = None
def start(self):
""" Start module isolated """
self.DATA_FILTER.id = None
self.DATA_FILTER.package = Misc.hasKey(self.ME_CONFIG, 'FILTER_PACKAGE', '')
self.DATA_FILTER.source_type = SourceTypes.RECOGNIZER
self.DATA_FILTER.source_name = Misc.hasKey(self.ME_CONFIG, 'FILTER_NAME', '')
self.DATA_FILTER.source_item = Misc.hasKey(self.ME_CONFIG, 'FILTER_ITEM', '')
self.Limit = Misc.hasKey(self.ME_CONFIG, 'FILTER_LIMIT', -1)
self.CLASSES = Misc.hasKey(self.ME_CONFIG, 'CLASSES', [])
self.setLoggingSettings(self.loggingLevel)
self.preLoad()
self.loadModel()
self.loadChannels()
self.loaded()
self.running = True
failedSend = 0
lastAnalizedTime = time() - 60
while self.running:
gdList = []
try:
if self.Simulating:
gdList = self.simulateData(self.DATA_FILTER)
else:
gdList = self.receive(self.DATA_FILTER, limit=self.Limit, lastTime=self.LastTime)
self.LastTime = float(gdList[0]['queryTime'])
except:
self.log(self.CP.errorDetail(Messages.analyzer_error_get), LogTypes.ERROR)
auxData = '"t":"json", \
"source_id":"{}", "source_type":"{}", "source_name":"{}", "source_item":"{}", \
"source_package":"{}", "source_aux":"{}"'
if time() - lastAnalizedTime > 60 * 1: # 1 minute
dataNoEvent = Data()
dataNoEvent.data = ''
dataNoEvent.aux = '{"no_event":"no event", "source_aux":{"no_event":"no event"} }'
gdList.append(dataNoEvent)
for objData in gdList[1:]:
try:
lastAnalizedTime = time()
t0 = time()
dataAnalizedList = self.analyze(objData)
#self.log('Time elapsed to get prediction: ' + str(round(time() - t0, 4)), logType=LogTypes.DEBUG, item=self.ME_NAME)
#print('Time elapsed to get prediction: ' + str(round(time() - t0, 4)), end='\r')
for dataAnalized in dataAnalizedList:
dataAnalized.source_type = self.ME_TYPE
dataAnalized.source_name = self.ME_NAME
if dataAnalized.package == '' or dataAnalized.package == None:
dataAnalized.package = objData.package
if dataAnalized.aux == '' or dataAnalized.aux == None:
dataAnalized.aux = auxData.format(objData.id,
objData.source_type, objData.source_name, objData.source_item,
dataAnalized.package, dataAnalized.aux)
dataAnalized.aux = '{' + dataAnalized.aux + '}'
if self.ME_STANDALONE:
self.showData(dataAnalized, objData)
else:
if dataAnalized.data != '':
print(time(),': Notifing a', dataAnalized.data)
self.notify(dataAnalized)
self.send(dataAnalized)
failedSend = 0
except:
self.log(Messages.analyzer_error_send, LogTypes.ERROR)
failedSend += 1
if failedSend > 2:
self.stop()
break
@abc.abstractmethod
def preLoad(self):
""" Implement me! :: Do anything necessary for processing """
pass
@abc.abstractmethod
def loadModel(self):
""" Loads model """
raise ValueError('Implement me! :: Load the model')
def loadChannels(self):
""" Loads available channels """
self.log(Messages.system_channels_start, LogTypes.INFO)
loc = LoaderOfChannel(self.ME_CONFIG, self.CP)
loc.ANALYZER_PATH = self.ME_PATH
self.queueMessages = Queue()
self.LoaderOfChannelsThread = Process(target=loc.start, args=(self.queueMessages,))
self.LoaderOfChannelsThread.start()
#del loc
self.log(Messages.system_channels_started, LogTypes.INFO)
@abc.abstractmethod
def loaded(self):
""" Implement me! :: Just after load the model """
pass
@abc.abstractmethod
def analyze(self, data:Data):
""" Implement me! :: Exec prediction to recognize an activity """
raise ValueError('Implement me! :: Exec analyze of activity')
def notify(self, data:Data):
""" Send data to pool of messages to notify """
self.queueMessages.put(data.toString(dataPlain=True,auxPlain=True))
|
remotefiles.py | #!/usr/bin/env python
# encoding: utf-8
from os import remove
import os.path
from multiprocessing import Process, Lock
from Naked.toolshed.system import stderr, stdout, file_exists
from doxx.commands.pull import pull_binary_file, pull_text_file
from doxx.commands.unpack import unpack_run
from doxx.utilities.filesystem import _create_dirs, _make_os_dependent_path
########################################
#
# [pull_textfile_runner]
# public function
# - pull remote text files
#
########################################
def pull_textfile_runner(text_url_dict):
"""pulls remote text files to local filesystem (public function)"""
file_list = list(text_url_dict) # the local outfile names in a list
number_of_files = len(file_list) # the number of files included in the list
if number_of_files > 0:
if number_of_files > 1: # multiple text file pull, each in separate process
processes = [] # list of spawned processes
outputlock = Lock() # stdout / stderr writes lock
iolock = Lock() # input/output lock
# iterate through requested files and execute pull in separate process for each one
for file_path in file_list:
p = Process(target=_pull_textfile_multiprocess, args=(file_path, text_url_dict[file_path], outputlock, iolock))
p.start()
processes.append(p)
for process in processes:
process.join(timeout=60)
else: # single text file pull
file_path = file_list[0]
_pull_textfile(file_path, text_url_dict[file_path]) # file_path is local path for write, dictionary value is the URL
else:
stderr("[!] doxx: Unable to find text files to pull in the key file", exit=0)
########################################
#
# [pull_binaryfile_runner]
# public function
# - pull remote binary files
#
########################################
def pull_binaryfile_runner(binary_url_dict):
"""pulls remote binary files to local filesystem (public function)"""
file_list = list(binary_url_dict) # the local outfile names in a list
number_of_files = len(file_list) # the number of files included in the list
if number_of_files > 0:
if number_of_files > 1: # multiple binary file pull, each in separate process
processes = [] # list of spawned processes
outputlock = Lock() # stdout / stderr writes lock
iolock = Lock() # input/output lock
# iterate through requested files and execute pull in separate process for each one
for file_path in file_list:
p = Process(target=_pull_binaryfile_multiprocess, args=(file_path, binary_url_dict[file_path], outputlock, iolock))
p.start()
processes.append(p)
for process in processes:
process.join(timeout=60)
else: # single text file pull
file_path = file_list[0]
_pull_binaryfile(file_path, binary_url_dict[file_path]) # file_path is local path for write, dictionary value is the URL
else:
stderr("[!] doxx: Unable to find binary files to pull in the key file", exit=0)
###########################################
#
# [pull_github_repo_runner]
# public function
# - pull remote Github repo archives
#
###########################################
def pull_github_repo_runner(repo_url_dict):
"""pulls remote Github repository archives to the local filesystem and unpacks (public function)"""
file_list = list(repo_url_dict) # the local outfile names in a list
number_of_files = len(file_list) # the number of files included in the list
if number_of_files > 0:
if number_of_files > 1: # multiple binary file pull, each in separate process
stdout("[*] doxx: Hang in there. Pulling " + str(number_of_files) + " entire repositories. This may take a bit of time...")
processes = [] # list of spawned processes
outputlock = Lock() # stdout / stderr writes lock
iolock = Lock() # input/output lock
# iterate through requested files and execute pull in separate process for each one
for file_path in file_list:
p = Process(target=_pull_github_repo_multiprocess, args=(file_path, repo_url_dict[file_path], outputlock, iolock))
p.start()
processes.append(p)
for process in processes:
process.join(timeout=120)
else: # single text file pull
stdout("[*] doxx: Hang in there. Pulling an entire repository. This may take a bit of time...")
file_path = file_list[0]
_pull_github_repo(file_path, repo_url_dict[file_path]) # file_path is local path for write, dictionary value is the URL
else:
stderr("[!] doxx: Unable to find binary files to pull in the key file", exit=0)
###############################################
#
# [_pull_textfile]
# private function
# - execute single process text file pulls
#
###############################################
def _pull_textfile(file_path, url):
"""executes single process text file pulls (private function)"""
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
_create_dirs(file_path)
# pull the file and write to local filesystem
try:
pull_text_file(url, file_path)
except Exception as e:
stderr("[!] doxx: Unable to pull '" + file_path + "' from '" + url + "'. Error: " + str(e), exit=1)
if file_exists(file_path):
stdout("[+] doxx: '" + file_path + "' ...check!")
else:
stderr("[!] doxx: There was an error pulling '" + file_path + "'. Error: Unable to locate local file.", exit=1)
########################################
#
# [_pull_textfile_multiprocess]
# private function
# - execute multi-file, multiprocess
# text file pulls
#
########################################
def _pull_textfile_multiprocess(file_path, url, outputlock, iolock):
"""executes multiprocess, multi-file text file pulls (private function)"""
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
iolock.acquire()
_create_dirs(file_path)
iolock.release()
# pull the file and write to local filesystem
try:
pull_text_file(url, file_path)
except Exception as e:
outputlock.acquire()
stderr("[!] doxx: Unable to pull '" + file_path + "' from '" + url + "'. Error: " + str(e), exit=0)
outputlock.release()
if file_exists(file_path):
outputlock.acquire()
stdout("[+] doxx: '" + file_path + "' ...check!")
outputlock.release()
else:
outputlock.acquire()
stderr("[!] doxx: There was an error pulling '" + file_path + "'. Error: Unable to locate local file", exit=0)
outputlock.release()
########################################
#
# [_pull_binaryfile]
# private function
# - execute single process binary
# file pulls
#
########################################
def _pull_binaryfile(file_path, url):
"""executes single process binary file pulls (private function)"""
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
_create_dirs(file_path)
# pull the file and write to local filesystem
try:
pull_binary_file(url, file_path)
except Exception as e:
stderr("[!] doxx: Unable to pull '" + file_path + "' from '" + url + "'. Error: " + str(e), exit=0)
if file_exists(file_path):
stdout("[+] doxx: '" + file_path + "' ...check!")
else:
stderr("[!] doxx: There was an error pulling '" + file_path + "'. Error: Unable to locate local file.", exit=1)
########################################
#
# [_pull_binaryfile_multiprocess]
# private function
# - execute multiprocess multi-file
# binary file pulls
#
########################################
def _pull_binaryfile_multiprocess(file_path, url, outputlock, iolock):
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
iolock.acquire()
_create_dirs(file_path)
iolock.release()
# pull the file and write to local filesystem
try:
pull_binary_file(url, file_path)
except Exception as e:
outputlock.acquire()
stderr("[!] doxx: Unable to pull '" + file_path + "' from '" + url + "'. Error: " + str(e), exit=0)
outputlock.release()
if file_exists(file_path):
outputlock.acquire()
stdout("[+] doxx: '" + file_path + "' ...check!")
outputlock.release()
else:
outputlock.acquire()
stderr("[!] doxx: There was an error pulling '" + file_path + "'. Error: Unable to locate local file", exit=0)
outputlock.release()
########################################
#
# [_pull_github_repo]
# private function
# - execute single process Github
# repository archive pulls
#
########################################
def _pull_github_repo(file_path, url):
"""executes single process Github repository archive pulls (private function)"""
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
_create_dirs(file_path)
# pull the file and write to local filesystem
try:
pull_binary_file(url, file_path)
except Exception as e:
stderr("[!] doxx: Unable to pull the archive file from the URL '" + url + "'. Error: " + str(e), exit=0)
if file_exists(file_path):
root_dir = unpack_run(file_path)
remove(file_path)
stdout("[+] doxx: '" + root_dir + "' ...check!")
else:
stderr("[!] doxx: There was an error pulling the repository file. Error: Unable to locate local archive file.", exit=1)
########################################
#
# [_pull_github_repo_multiprocess]
# private function
# - execute multiprocess multi-file
# Github repo archive pulls
#
########################################
def _pull_github_repo_multiprocess(file_path, url, outputlock, iolock):
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
iolock.acquire()
_create_dirs(file_path)
iolock.release()
# pull the file and write to local filesystem
try:
pull_binary_file(url, file_path)
except Exception as e:
outputlock.acquire()
stderr("[!] doxx: Unable to pull the archive file from the URL '" + url + "'. Error: " + str(e), exit=0)
outputlock.release()
if file_exists(file_path):
root_dir = unpack_run(file_path)
remove(file_path)
outputlock.acquire()
stdout("[+] doxx: '" + root_dir + "' ...check!")
outputlock.release()
else:
outputlock.acquire()
stderr("[!] doxx: There was an error pulling the repository file. Error: Unable to locate local archive file.", exit=1)
outputlock.release()
|
server.py | from collections import OrderedDict
from io import BytesIO
import json
import ssl
from threading import Thread
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from . import *
from .. import appstore
from ..util import http
from .. import spk
class BufferedWriter(BytesIO):
def __init__(self, file):
BytesIO.__init__(self)
self.wrappedFile = file
def flush(self):
self.wrappedFile.write(self.getvalue())
self.truncate(0)
def close(self):
BytesIO.close(self)
self.wrappedFile.close()
class HttpHandler(BaseHTTPRequestHandler):
def setup(self):
BaseHTTPRequestHandler.setup(self)
self.wfile = BufferedWriter(self.wfile)# Responses have to be buffered and sent in one go
def log_request(self, code='-', size='-'):
pass
def output(self, mimeType, data, filename=None):
self.send_response(200)
self.send_header('Connection', 'Keep-Alive')
self.send_header('Content-Type', mimeType)
self.send_header('Content-Length', len(data))
if filename:
self.send_header('Content-Disposition', 'attachment;filename="%s"' % filename)
self.end_headers()
self.wfile.write(data)
def do_POST(self):
self.server.handlePost(self, self.rfile.read(int(self.headers['Content-Length'])))
def do_GET(self):
self.server.handleGet(self)
class LocalMarketServer(HTTPServer):
"""A local https server to communicate with the camera"""
def __init__(self, certFile, fakeHost, host='127.0.0.1', port=4443):
HTTPServer.__init__(self, (host, port), HttpHandler)
self.host = host
self.port = port
self.url = 'https://' + host + '/'
self.fakeUrl = 'https://' + fakeHost + '/'
self.apk = None
self.result = None
self.socket = ssl.wrap_socket(self.socket, server_side=True, ssl_version=ssl.PROTOCOL_TLSv1, certfile=certFile)
def startup(self):
"""Start the local server"""
thread = Thread(target=self.serve_forever)
thread.daemon = True
thread.start()
def setApk(self, apkData):
self.apk = apkData
def getXpd(self):
"""Return the xpd contents"""
return getXpdResponse('0', self.fakeUrl)
def getResult(self):
"""Return the result sent from the camera"""
if not self.result:
raise Exception('Task was not completed')
return self.result
def handlePost(self, handler, body):
"""Handle POST requests to the server"""
if not self.result and self.apk:
# Tell the camera to download and install an app
response = getJsonInstallResponse('app', self.url)
else:
response = getJsonResponse()
self.result = parsePostData(body)# Save the result sent by the camera
handler.output(constants.jsonMimeType, response)
def handleGet(self, handler):
"""Handle GET requests to the server"""
# Send the spk file to the camera
handler.output(spk.constants.mimeType, spk.dump(self.apk), 'app%s' % spk.constants.extension)
class RemoteAppStore(object):
"""A wrapper for a remote api"""
def __init__(self, host):
self.base = 'https://' + host
def listApps(self):
apps = (appstore.App(None, dict) for dict in json.loads(http.get(self.base + '/api/apps').data))
return OrderedDict((app.package, app) for app in apps)
def sendStats(self, result):
http.post(self.base + '/api/stats', json.dumps(result).encode('latin1'))
class ServerContext(object):
"""Use this in a with statement"""
def __init__(self, server):
self._server = server
def __enter__(self):
self._server.startup()
return self._server
def __exit__(self, type, value, traceback):
self._server.shutdown()
|
test_thread.py | """
Copyright (c) 2008-2018, Jesus Cea Avion <jcea@jcea.es>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
"""TestCases for multi-threaded access to a DB.
"""
import os
import sys
import time
import errno
from random import random
DASH = '-'
try:
WindowsError
except NameError:
class WindowsError(Exception):
pass
import unittest
from test_all import db, dbutils, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class BaseThreadedTestCase(unittest.TestCase):
dbtype = db.DB_UNKNOWN # must be set in derived class
dbopenflags = 0
dbsetflags = 0
envflags = 0
def setUp(self):
if verbose:
dbutils._deadlock_VerboseFile = sys.stdout
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.setEnvOpts()
self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
self.filename = self.__class__.__name__ + '.db'
self.d = db.DB(self.env)
if self.dbsetflags:
self.d.set_flags(self.dbsetflags)
self.d.open(self.filename, self.dbtype, self.dbopenflags|db.DB_CREATE)
def tearDown(self):
self.d.close()
self.env.close()
test_support.rmtree(self.homeDir)
def setEnvOpts(self):
pass
def makeData(self, key):
return DASH.join([key] * 5)
#----------------------------------------------------------------------
class ConcurrentDataStoreBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_CDB | db.DB_INIT_MPOOL
readers = 0 # derived class should set
writers = 0
records = 1000
def test01_1WriterMultiReaders(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_1WriterMultiReaders..." % \
self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers = []
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers=[]
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
def writerThread(self, d, keys, readers):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print "%s: creating records %d - %d" % (name, start, stop)
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print "%s: finished creating records" % name
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
for i in xrange(5) :
c = d.cursor()
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
if verbose:
print "%s: found %d records" % (name, count)
c.close()
if verbose:
print "%s: thread finished" % name
class BTreeConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
class HashConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
#----------------------------------------------------------------------
class SimpleThreadedBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
readers = 10
writers = 2
records = 1000
def setEnvOpts(self):
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
def test02_SimpleLocks(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_SimpleLocks..." % self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers = []
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers = []
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
def writerThread(self, d, keys, readers):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print "%s: creating records %d - %d" % (name, start, stop)
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
c = d.cursor()
count = 0
rec = dbutils.DeadlockWrap(c.first, max_retries=10)
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = dbutils.DeadlockWrap(c.next, max_retries=10)
if verbose:
print "%s: found %d records" % (name, count)
c.close()
if verbose:
print "%s: thread finished" % name
class BTreeSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_BTREE
class HashSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_HASH
#----------------------------------------------------------------------
class ThreadedTransactionsBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
envflags = (db.DB_THREAD |
db.DB_INIT_MPOOL |
db.DB_INIT_LOCK |
db.DB_INIT_LOG |
db.DB_INIT_TXN
)
readers = 0
writers = 0
records = 2000
txnFlag = 0
def setEnvOpts(self):
#self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
pass
def test03_ThreadedTransactions(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_ThreadedTransactions..." % \
self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers=[]
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers = []
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
dt = Thread(target = self.deadlockThread)
if sys.version_info[0] < 3 :
dt.setDaemon(True)
else :
dt.daemon = True
dt.start()
for t in writers:
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
self.doLockDetect = False
dt.join()
def writerThread(self, d, keys, readers):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
count=len(keys)//len(readers)
while len(keys):
try:
txn = self.env.txn_begin(None, self.txnFlag)
keys2=keys[:count]
for x in keys2 :
key = '%04d' % x
d.put(key, self.makeData(key), txn)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
txn.commit()
keys=keys[count:]
readers.pop().start()
except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
if verbose:
print "%s: Aborting transaction (%s)" % (name,
val.args[1])
txn.abort()
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
finished = False
while not finished:
try:
txn = self.env.txn_begin(None, self.txnFlag)
c = d.cursor(txn)
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
if verbose: print "%s: found %d records" % (name, count)
c.close()
txn.commit()
finished = True
except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
if verbose:
print "%s: Aborting transaction (%s)" % (name,
val.args[1])
c.close()
txn.abort()
if verbose:
print "%s: thread finished" % name
def deadlockThread(self):
self.doLockDetect = True
while self.doLockDetect:
time.sleep(0.05)
try:
aborted = self.env.lock_detect(
db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
if verbose and aborted:
print "deadlock: Aborted %d deadlocked transaction(s)" \
% aborted
except db.DBError:
pass
class BTreeThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
class HashThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
class BTreeThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT
class HashThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(BTreeConcurrentDataStore))
suite.addTest(unittest.makeSuite(HashConcurrentDataStore))
suite.addTest(unittest.makeSuite(BTreeSimpleThreaded))
suite.addTest(unittest.makeSuite(HashSimpleThreaded))
suite.addTest(unittest.makeSuite(BTreeThreadedTransactions))
suite.addTest(unittest.makeSuite(HashThreadedTransactions))
suite.addTest(unittest.makeSuite(BTreeThreadedNoWaitTransactions))
suite.addTest(unittest.makeSuite(HashThreadedNoWaitTransactions))
else:
print "Threads not available, skipping thread tests."
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
socket.py | # Author: Max Greenwald, Pulkit Jain
# 11/30/2018
#
# Module to create a simple interface for persistent socket connections
# package imports
import asyncio
import websockets
import threading
from .extra import Queue
async def start(skt):
await skt.run()
class Socket(object):
"""Our interface to manage a persistent socket connection
It is defined by the following attributes:
- host: string representing the host
- port: integer representing what port number to connect to
- connection: the actual websocket
- swap_time: time to swap execution between receiver and sender
- inbox: Incoming messages Queue
- outbox: Outgoing messages Queue
- running: Boolean representing whether Socket is running or not
It is defined by the following behaviors:
- raise_error_uninit(): Raises error if the socket is not initialized
- run(): Handles the message consumption and production
- _receive_handler(): handles message consumption
- _outgoing_message(): handle message production
"""
def __init__(self, host='localhost', port=8765):
self.host = host
self.port = port
self.connection = None
self.swap_time = 0.01
self.inbox = Queue()
self.outbox = Queue()
self.running = True
def raise_error_uninit(self):
if not self.connection:
raise Exception("Socket not started. \n"
"USAGE: asyncio.run(socket.start(...) to use this.")
async def run(self):
self.connection = await websockets.connect(f'ws://{self.host}:'
f'{self.port}')
outbox_thread = threading.Thread(target=self._send_handler)
outbox_thread.start()
consumer_task = asyncio.ensure_future(self._receive_handler())
done, pending = await asyncio.wait(
[consumer_task],
return_when=asyncio.FIRST_COMPLETED
)
for task in pending:
task.cancel()
outbox_thread.join()
async def _receive_handler(self):
self.raise_error_uninit()
async for message in self.connection:
self.inbox.put(message)
await asyncio.sleep(self.swap_time)
def _send_handler(self):
self.raise_error_uninit()
while self.running:
message = self.outbox.get()
asyncio.run(self.connection.send(message))
|
parameter_server.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch.multiprocessing as _mp
mp = _mp.get_context('spawn')
# XXX hack fix path
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'mini-rts', 'elf'))
import utils_elf
import random
from datetime import datetime
'''
Usage:
In process main function, run the following and then you get a shared model.
if rank == 0:
model = build_model(with_cuda)
else:
model = None
model = param_server.sync_model(rank, model)
'''
class Cond:
def __init__(self):
self.cond = mp.Condition()
def wait(self):
self.cond.acquire()
self.cond.wait()
self.cond.release()
def wait_noblock(self):
self.cond.acquire()
self.cond.wait(0)
self.cond.release()
def notify(self):
self.cond.acquire()
self.cond.notify()
self.cond.release()
class ParameterServer(object):
def __init__(self, n_processes):
self.queue = mp.Queue()
self.n_processes = n_processes
self.barrier = mp.Barrier(n_processes)
# For update signal.
self.send_done = Cond()
self.recv_done = Cond()
def __getstate__(self):
return (self.queue, self.barrier, self.n_processes, self.send_done, self.recv_done)
def __setstate__(self, state):
self.queue, self.barrier, self.n_processes, self.send_done, self.recv_done = state
def server_send_model(self, mi):
assert mi is not None
for i in range(self.n_processes-1):
self.queue.put(mi)
self._server_shared_mi = mi
self.barrier.wait()
def client_receive_model(self):
mi = self.queue.get()
# clone the gradients to break the sharing
for _, model in mi.models.items():
for param in model.parameters():
if param.grad is not None:
param._grad = param.grad.clone()
self.barrier.wait()
self._client_shared_mi = mi
return self._client_shared_mi
def server_update_model(self, key, new_mi, noblock=False):
# if recv is not done, skip it.
if noblock:
try:
self.recv_done.wait_noblock()
except:
# The recv is not done yet. Cannot send.
return False
else:
self.recv_done.wait()
self._server_shared_mi.update_model(key, new_mi)
# Then wait until other people have received.
self.send_done.notify()
return True
def client_refresh_model(self, gpu=None, skip=False):
# First wait until we are synced up.
self.send_done.wait()
if not skip:
mi = self._client_shared_mi.clone(gpu=gpu)
else:
mi = None
self.recv_done.notify()
return mi
class SharedData:
def __init__(self, total_process, mi, batch_template,
cb_remote_initialize=None,
cb_remote_batch_process=None,
args=None):
self.server = ParameterServer(total_process)
self.cb_remote_initialize = cb_remote_initialize
self.cb_remote_batch_process = cb_remote_batch_process
self.args = args
#def get_gpu_id(i): return i + 1
def get_gpu_id(i): return 0
# Share only training batches.
shared_batches = []
cvs_send = []
cvs_recv = []
qs = []
for i in range(total_process - 1):
# gpu_id = get_gpu_id(i)
# shared_batches.append(cpu2gpu(all_batches[train_idx][0], gpu=gpu_id))
shared_batches.append(utils_elf.pin_clone(batch_template))
qs.append(mp.Queue(1))
qs[-1].put(shared_batches[i])
cvs_send.append(Cond())
cvs_recv.append(Cond())
self.cvs_send = cvs_send
self.cvs_recv = cvs_recv
self.shared_batches = shared_batches
self.qs = qs
self.b = mp.Barrier(total_process)
self.optimizers = [mp.Process(target=self.process_main, args=(i, get_gpu_id(i))) for i in range(total_process - 1)]
for optimizer in self.optimizers: optimizer.start()
# Wait until all models have received the shared memory.
self.b.wait()
self.server.server_send_model(mi)
def process_main(self, i, gpu_id):
batch = self.qs[i].get()
self.b.wait()
batch_gpu = utils_elf.cpu2gpu(batch, gpu=gpu_id)
mi = self.server.client_receive_model()
context = self.cb_remote_initialize(mi, gpu_id, self.args)
print("[%d] Context initialization completed, gpu_id = %d.. " % (i, gpu_id))
# Ready.
self.cvs_send[i].notify()
while True:
self.cvs_recv[i].wait()
utils_elf.transfer_cpu2gpu(batch, batch_gpu, async=True)
self.cvs_send[i].notify()
self.cb_remote_batch_process(context, batch_gpu)
def send_batch(self, batch):
process_idx = random.randint(0, len(self.shared_batches) - 1)
try:
self.cvs_send[process_idx].wait_noblock()
utils_elf.transfer_cpu2cpu(batch, self.shared_batches[process_idx])
self.cvs_recv[process_idx].notify()
return True
except Exception as e:
#print("Failed to send batch to %d" % process_idx)
#print(type(e))
#print(e.args)
#print(e)
return False
|
launcher.py |
import os
import sys
import shlex
import subprocess
import threading
import time
import json
import signal
import atexit
import logging
from runtimemngr.settings import Settings
class FileType():
WA = 'WASM'
PY = 'PY'
EXE = 'EXE'
class ModuleLaucher():
def __init__(self):
self.lock = threading.Lock()
self.proclist = {}
self.allDone = False
atexit.register(self.killAll)
def _run_thread(self, cmd, env, muuid):
proc = subprocess.Popen(cmd, env=env, shell=False, preexec_fn=os.setsid)
self.lock.acquire()
self.proclist[muuid] = proc
self.lock.release()
while (1) :
ret = proc.poll()
time.sleep(5) # TODO: asyncio module for an asynchronous wait
if (ret):
# process terminated
self.lock.acquire()
try:
self.proclist.pop(muuid)
except:
logging.error("Could not remove process from process list.")
self.lock.release()
return
def kill(self, muuid):
try:
self.lock.acquire()
proc = self.proclist[muuid]
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
self.lock.release()
except:
logging.error("Could not find module", muuid)
def killAll(self):
self.lock.acquire()
for muuid, proc in self.proclist.items():
logging.debug("Killing", muuid)
try:
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
except:
logging.error("Could not kill module", muuid)
self.allDone = True
self.lock.release()
def isAllDone(self):
self.lock.acquire()
r = self.allDone
self.lock.release()
return r
def run(self, module, rt_dbg_topic, rt_ctl_topic, done_msg):
cmd = []
if module.uuid in self.proclist.keys():
raise Exception("Module already running (duplicate uuid)")
if (module.filetype == FileType.PY):
cmd.append(Settings.s_dict['runtime']['py_launcher_path'])
elif (module.filetype == FileType.EXE):
cmd.append(Settings.s_dict['runtime']['exe_launcher_path'])
else:
raise Exception("We only support Python and executable files")
#elif (module.filetype == FileType.WA):
# cmd.append(settings.s_dict['runtime']['wasm_launcher_path'])
stdin_topic = rt_dbg_topic+'/stdin/'+module.uuid
stdout_topic = rt_dbg_topic+'/stdout/'+module.uuid
# convert args into a string
logging.debug(module.args, type(module.args))
if type(module.args) is list:
str_args = ' '.join(module.args)
else:
str_args = str(module.args)
logging.debug(str_args)
# start our variables with __ so they do not collide with module's variables
env = {
'__mqtt_srv' : shlex.quote(Settings.s_dict['mqtt_server']['host']),
'__mqtt_prt' : shlex.quote(str(Settings.s_dict['mqtt_server']['port'])),
'__mqtt_un' : shlex.quote(Settings.s_dict['mqtt_server']['username']),
'__mqtt_pw' : shlex.quote(Settings.s_dict['mqtt_server']['password']),
'__name': shlex.quote(module.name),
'__store_url': shlex.quote(Settings.s_dict['store_url']),
'__filename': shlex.quote(module.filename),
'__fid': shlex.quote(module.fileid),
'__pipe_stdin_stdout': shlex.quote(str(Settings.s_dict['runtime']['pipe_stdin_stdout'])),
'__sub_topic': shlex.quote(stdin_topic),
'__pub_topic': shlex.quote(stdout_topic),
'__args': shlex.quote(str_args),
'__done_topic': shlex.quote(rt_ctl_topic),
'__done_msg': done_msg}
if type(module.env) is list:
for env_str in module.env:
v = env_str.split("=")
env[v[0]]=v[1]
elif type(module.env) is str:
# still handle env as strings (though arts should not be sending strings anymore)
logging.warn('WARNING: Received env as string.')
if (module.env.find(' ') != -1):
for vstr in module.env.split(" "):
v = vstr.split("=")
env[v[0]]=v[1]
elif (module.env.find('=') != -1):
module.env = vstr.split("=")
env[v[0]]=v[1]
logging.info('env=' + str(env))
logging.info('cmd=' + str(cmd))
t = threading.Thread(target=self._run_thread, args=(cmd, env, module.uuid))
t.start()
|
rotation.py | # -*- coding: utf-8 -*-
import threading
import traceback
from deval.device.std.error import DevalError
from deval.utils.snippet import reg_cleanup, is_exiting, on_method_ready
from deval.utils.logger import get_logger
from deval.component.android.utils.constant import ROTATIONWATCHER_APK, ROTATIONWATCHER_PACKAGE
LOGGING = get_logger(__name__)
class RotationWatcher(object):
"""
RotationWatcher class
"""
def __init__(self, adb):
self.adb = adb
self.ow_proc = None
self.ow_callback = []
self._t = None
self.current_orientation = None
reg_cleanup(self.teardown)
@on_method_ready('start')
def get_ready(self):
pass
def _install_and_setup(self):
"""
Install and setup the RotationWatcher package
Raises:
RuntimeError: if any error occurs while installing the package
Returns:
None
"""
try:
apk_path = self.adb.path_app(ROTATIONWATCHER_PACKAGE)
except DevalError:
self.adb.install_app(ROTATIONWATCHER_APK, ROTATIONWATCHER_PACKAGE)
apk_path = self.adb.path_app(ROTATIONWATCHER_PACKAGE)
p = self.adb.start_shell('export CLASSPATH=%s;exec app_process /system/bin jp.co.cyberagent.stf.rotationwatcher.RotationWatcher' % apk_path)
if p.poll() is not None:
raise RuntimeError("orientationWatcher setup error")
self.ow_proc = p
def teardown(self):
if self.ow_proc:
self.ow_proc.kill()
def start(self):
"""
Start the RotationWatcher daemon thread
Returns:
initial orientation
"""
self._install_and_setup()
def _refresh_by_ow():
line = self.ow_proc.stdout.readline()
if line == b"":
if LOGGING is not None: # may be None atexit
LOGGING.debug("orientationWatcher has ended")
else:
print("orientationWatcher has ended")
return None
ori = int(int(line) / 90)
return ori
def _run():
while True:
ori = _refresh_by_ow()
if ori is None:
break
LOGGING.info('update orientation %s->%s' % (self.current_orientation, ori))
self.current_orientation = ori
if is_exiting():
break
for cb in self.ow_callback:
try:
cb(ori)
except:
LOGGING.error("cb: %s error" % cb)
traceback.print_exc()
self.current_orientation = _refresh_by_ow()
self._t = threading.Thread(target=_run, name="rotationwatcher")
# self._t.daemon = True
self._t.start()
return self.current_orientation
def reg_callback(self, ow_callback):
"""
Args:
ow_callback:
Returns:
"""
"""方向变化的时候的回调函数,参数一定是ori,如果断掉了,ori传None"""
self.ow_callback.append(ow_callback)
class XYTransformer(object):
"""
transform the coordinates (x, y) by orientation (upright <--> original)
"""
@staticmethod
def up_2_ori(tuple_xy, tuple_wh, orientation):
"""
Transform the coordinates upright --> original
Args:
tuple_xy: coordinates (x, y)
tuple_wh: screen width and height
orientation: orientation
Returns:
transformed coordinates (x, y)
"""
x, y = tuple_xy
w, h = tuple_wh
if orientation == 1:
x, y = w - y, x
elif orientation == 2:
x, y = w - x, h - y
elif orientation == 3:
x, y = y, h - x
return x, y
@staticmethod
def ori_2_up(tuple_xy, tuple_wh, orientation):
"""
Transform the coordinates original --> upright
Args:
tuple_xy: coordinates (x, y)
tuple_wh: screen width and height
orientation: orientation
Returns:
transformed coordinates (x, y)
"""
x, y = tuple_xy
w, h = tuple_wh
if orientation == 1:
x, y = y, w - x
elif orientation == 2:
x, y = w - x, h - y
elif orientation == 3:
x, y = h - y, x
return x, y
|
Custom_Button.py | ## ¬! v 1.00 SyberProjects
## ¬! 26/09/2019 @ 20:08 GMT
import tkinter as tk
from tkinter import ttk
from PIL import Image, ImageDraw, ImageFont, ImageTk, features
import textwrap
from time import sleep
from queue import Queue
from threading import Thread
class Round_Button(tk.Label):
def __init__(self, top, text, size, static_colour, static_t_colour, transformation_colour, transformation_t_colour, background:str='#FFFFFF', static_outline=None, trans_outline=None):
'''
:param top: Top level / root. The window in which the button is going to be placed. [Tkinter Object]
:param text: Text that is placed on the button. [String]
:param size: Multiplier for the size. [Integer]
:param static_colour: Colour for the button when static. [Tuple,(R,G,B)]
:param static_t_colour: Colour for the text when the button is static. [Tuple,(R,G,B)]
:param transformation_colour: Colour for the button when cursor is over it. [Tuple,(R,G,B)]
:param transformation_t_colour: Colour for the text when the cursor is over the button. [Tuple,(R,G,B)]
:param background: Sets the background colour of the Button so it can blend with the window's background [Tuple, (RGB)] Defaults to WHITE (#FFFFFF)
:param static_outline: outline colour of static image. [Tuple, (RGB)] Defaults to static_colour value.
:param trans_outline: outline colour of transformed image. [Tuple, (RGB)] Defaults to transformation_colour value.
'''
## Initialisation
## ==============
tk.Label.__init__(self, top) # Inherits the features of a label
self.sc = static_colour
self.tc = transformation_colour
self.tsc = static_t_colour
self.ttc = transformation_t_colour
self.multi = size
self.resoltuion = (int(35*size), int(10*size)) # 3.5 : 1 (W : H)
self.text = text
self.change_to_trans = False
self.change_to_static = False
self.static_outline = static_outline
self.trans_outline = trans_outline
if static_outline == None:
self.static_outline = static_colour
if trans_outline == None:
self.trans_outline = transformation_colour
self.create_custom_image() #Create static and transformed buttons
self.create_lower_button() #Creates Lower Button
self.connect_function()
self.configure(image=self.Images[9]) #Inserts static button images
self.configure(background=background)
self.bind("<Enter>", self.on_enter) #Hover on capabilities
self.bind("<Leave>", self.on_leave) #Hover off capabilities
self.queue = Queue()
self.Animator = Thread(target=self.Manage_Animation)
self.Animator.start()
def create_custom_image(self):
decrement = -1
while True:
# < decrement > : Used for lowering the font size so that the text doesn't go off the screen.
decrement += 1
font = ImageFont.truetype("Assets/GentiumBasic-Bold.ttf", int(5.5 * self.multi) - decrement, encoding="unic")
coords, Lines, line_height = self.draw_multiple_line_text(self.text, font, int(36 * self.multi), int(2 * self.multi), 12)
if coords[-1][1] + line_height + 5 > self.resoltuion[1]:
continue
break
self.images = [Image.new('RGBA', (self.resoltuion)) for i in range (10)]
# Initialising the draw the ImageDraw.Draw object
self.image_drawer = [ImageDraw.Draw(self.images[i]) for i in range (10)]
self.image_colours = [[self.tc[i] + ((self.sc[i]-self.tc[i])//10)*x for i in range (3)] for x in range (10)]
self.text_colours = [[self.ttc[i] + ((self.tsc[i] - self.ttc[i]) // 10) * x for i in range(3)] for x in range(10)]
self.outline_colours = [[self.trans_outline[i] + ((self.static_outline[i] - self.trans_outline[i]) // 10) * x for i in range(3)] for x in range(10)]
for i in range(10):
# Puts the colours in a tuple for use.
colour = (self.image_colours[i][0],self.image_colours[i][1],self.image_colours[i][2])
textcolour = (self.text_colours[i][0], self.text_colours[i][1], self.text_colours[i][2])
outline = (self.outline_colours[i][0], self.outline_colours[i][1], self.outline_colours[i][2])
# Creates the base for both images (Rectangles)
self.image_drawer[i].rectangle((int(5.5 * self.multi),0, self.resoltuion[0] - int(5.5 * self.multi), self.resoltuion[1]-1), outline=outline, width =2, fill=colour)
# Create a rectangle to remove the unwanted areas of colour, and adds an elipses to give a round effect.
# 2 on both sides for 2 images.
self.image_drawer[i].rectangle((self.resoltuion[0] - int(5.5 * self.multi), 0, self.resoltuion[0], self.resoltuion[1]-2),fill=(0, 0, 0, 0))
self.image_drawer[i].ellipse((self.resoltuion[0] - int(10 * self.multi), 0, self.resoltuion[0]-1, self.resoltuion[1]-2),outline=outline, width=2, fill=colour)
self.image_drawer[i].rectangle((0, 0, int(5.5 * self.multi), int(10 * self.multi)-2), fill=(0, 0, 0, 0))
self.image_drawer[i].ellipse((0, 0, int(10 * self.multi), int(10 * self.multi)-2), outline=outline, width=2 ,fill=(colour))
self.image_drawer[i].rectangle((int(5.5 * self.multi), 2, self.resoltuion[0] - int(5.5 * self.multi), self.resoltuion[1]-3), fill=colour)
for x in range (len(coords)):
self.image_drawer[i].text(coords[x], Lines[x], fill=textcolour, font=font, align='center')
self.Images = [ImageTk.PhotoImage(self.images[i]) for i in range (10)]
def create_lower_button(self):
multi_d = 0.25
multi = self.multi - multi_d
resoltuion = (int(35 * multi), int(10*multi))
decrement = -1
while True:
# < decrement > : Used for lowering the font size so that the text doesn't go off the screen.
decrement += 1
font = ImageFont.truetype("Assets/GentiumBasic-Bold.ttf", int(5.5 * multi) - decrement,encoding="unic")
coords, Lines, line_height = self.draw_multiple_line_text(self.text, font, int(36 * multi),int(2 * multi), 12)
if coords[-1][1] + line_height + 5 > self.resoltuion[1]-(10*multi_d):
continue
break
self.lower_button = Image.new('RGBA', (resoltuion))
# Initialising the draw the ImageDraw.Draw object
self.lower_drawer = ImageDraw.Draw(self.lower_button)
colour = (self.image_colours[0][0], self.image_colours[0][1], self.image_colours[0][2])
textcolour = (self.text_colours[0][0], self.text_colours[0][1], self.text_colours[0][2])
outline = (self.outline_colours[0][0], self.outline_colours[0][1], self.outline_colours[0][2])
# Creates the base for both images (Rectangles)
# Create a rectangle to remove the unwanted areas of colour, and adds an elipses to give a round effect.
# 2 on both sides for 2 images.
self.lower_drawer.rectangle((0, 0, resoltuion[0], resoltuion[1]-1), outline=outline, width=2, fill=colour)
# Create a rectangle to remove the unwanted areas of colour, and adds an elipses to give a round effect.
# 2 on both sides for 2 images.
# Right side
self.lower_drawer.rectangle((resoltuion[0] - int(5.5*multi), 0, resoltuion[0], resoltuion[1]),fill=(0, 0, 0, 0))
self.lower_drawer.ellipse((resoltuion[0] - int(10*multi), 0, resoltuion[0], resoltuion[1]), outline=outline, width=2, fill=colour)
# Left side
self.lower_drawer.rectangle((0, 0, int(5.5 * multi), int(10 * multi)), fill=(0, 0, 0, 0))
self.lower_drawer.ellipse((0, 0, int(10 * multi), int(10 * multi)), outline=outline, width=2, fill=(colour))
self.lower_drawer.rectangle((int(5.5 * multi), 2, resoltuion[0] - int(5.5*multi), resoltuion[1]-3), fill=colour)
for x in range(len(coords)):
self.lower_drawer.text(coords[x], Lines[x], fill=textcolour, font=font, align='center')
delta_x = (self.resoltuion[0] - resoltuion[0])//2
delta_y = (self.resoltuion[1] - resoltuion[1])//2
#Perfects the size for pasting.
self.lower_button = self.lower_button.resize(size=(self.resoltuion[0] - delta_x*2, self.resoltuion[1] - delta_y*2))
#Pasting Image ontop of transparent image with original resolution.
self.Button = Image.new('RGBA', (self.resoltuion))
self.Button.paste(self.lower_button, (delta_x, delta_y, self.resoltuion[0] - delta_x, self.resoltuion[1] - delta_y), self.lower_button)
self.lower_button = ImageTk.PhotoImage(self.Button)
def draw_multiple_line_text(self, text, font, text_start_width, text_start_height, Line_Width):
## Used for creating multi-line text. Splits the text across multiple lines if the text crosses the line width.
y_text = text_start_height
x_text = text_start_width
lines = textwrap.wrap(text, width=int(Line_Width))
Coords = []
Lines = []
line_height = 0
for line in lines:
line_width, line_height = font.getsize(line)
coords = [(x_text - line_width) / 2, y_text]
y_text += line_height
Coords.append(coords)
Lines.append(line)
return Coords, Lines, line_height
## Animation Effect.
## Hovering.
def on_enter(self,*args):
#switches images to the transformed button.
self.Q_Dump()
self.queue.put('E')
def Q_Dump(self):
for i in range (self.queue.qsize()):
self.queue.get_nowait()
def on_leave(self,*args):
#switches back to static image.
self.Q_Dump()
self.queue.put('L')
def Manage_Animation(self):
while True:
Factor = self.queue.get()
if Factor == 'E':
self.change_sc()
elif Factor == "L":
self.change_tsc()
def change_sc(self, si:int=9):
self.change_to_static = True
for i in range (si,0,-1):
if self.change_to_trans == True:
self.change_to_static = False
self.change_tsc(i)
break
sleep(0.01)
self.configure(image=self.Images[i])
if self.change_to_static:
self.change_to_static = False
def change_tsc(self, si:int=0):
self.change_to_trans = True
for i in range (si, 10):
if self.change_to_static == True:
self.change_to_trans = False
self.change_sc(i)
break
sleep(0.01)
self.configure(image=self.Images[i])
if self.change_to_trans:
self.change_to_trans = False
def connect_function(self, function=lambda:None):
#Binds the button to a function.
def connector(*args):
self.configure(image=self.lower_button)
function()
def disconnector(*args):
self.configure(image=self.Images[0])
self.bind("<ButtonPress-1>", connector)
self.bind("<ButtonRelease-1>", disconnector)
if __name__ == '__main__':
def New_Function():
print ('Functioning')
app = tk.Tk()
Background = ('#000000')
app.configure(background = Background)
Static_Colour = (0,0,0)
Text_Transformation_Colour = (255,255,255)
Transformation_Colour = (0,0,0)
Text_Static_Colour = (125,125,125)
Static_Outline = (125,125,125)
Transformation_Outline = (255,255,255)
Button = Round_Button(app, 'Login', 3, Static_Colour, Text_Static_Colour, Transformation_Colour, Text_Transformation_Colour, Background, Static_Outline, Transformation_Outline)
Button.connect_function(New_Function)
Button.grid(row=2, column=1, pady= 5, padx=10)
Label1 = ttk.Label(app, text='Username')
Label1.configure(background = Background)
Label1.configure(foreground=('#FFFFFF'))
Label1.grid(row=0,column=0, pady= 5, padx=10)
Entry1 = ttk.Entry(app)
Entry1.grid(row =0, column=1, pady= 5, padx=10)
Label2 = ttk.Label(app, text='Password')
Label2.configure(background = Background)
Label2.configure(foreground = ('#FFFFFF'))
Label2.grid(row=1, column=0, pady= 5, padx=10)
Entry2 = ttk.Entry(app)
Entry2.grid(row=1, column=1, pady= 5, padx=10)
app.mainloop()
|
dhcp.py | #!/usr/bin/python3
import time
import threading
import queue
import collections
import traceback
import socket
from random import randrange
import uuid
from .listener import *
from piman import logger
import csv
"""
This class contains specified attributes which will be populated, these attributes are associated with
the required options for our DHCP+PXE server.
"""
class WriteBootProtocolPacket(object):
message_type = 2 # 1 for client -> server 2 for server -> client
hardware_type = 1
hardware_address_length = 6
hops = 0
transaction_id = None
seconds_elapsed = 0
bootp_flags = 0 # unicast
# The following are set, but altered within the "send_offer" function inside the transaction class
client_ip_address = '0.0.0.0'
your_ip_address = '0.0.0.0'
next_server_ip_address = '0.0.0.0'
relay_agent_ip_address = '0.0.0.0'
vendor_class_identifier = "PXEClient"
boot_file_name = "bootcode.bin"
client_mac_address = None
magic_cookie = '99.130.83.99'
parameter_order = []
# Adds new attributes to the WriteBootProtocolPacket object for
# each option that is present in the DHCP server configuration.
# These attributes are used when constructing the BOOTP packet.
def __init__(self, configuration):
for i in range(256):
option_name = 'option_{}'.format(i)
if i < len(options) and hasattr(configuration, options[i][0]):
option_name = options[i][0]
if hasattr(configuration, option_name):
setattr(self, option_name, getattr(configuration, option_name))
def to_bytes(self):
result = bytearray(236)
result[0] = self.message_type
result[1] = self.hardware_type
result[2] = self.hardware_address_length
result[3] = self.hops
result[4:8] = struct.pack('>I', self.transaction_id)
result[8:10] = shortpack(self.seconds_elapsed)
result[10:12] = shortpack(self.bootp_flags)
result[12:16] = inet_aton(self.client_ip_address)
result[16:20] = inet_aton(self.your_ip_address)
result[20:24] = inet_aton(self.next_server_ip_address)
result[24:28] = inet_aton(self.relay_agent_ip_address)
result[28:28 + self.hardware_address_length] = macpack(self.client_mac_address)
result += inet_aton(self.magic_cookie)
for option in self.options:
value = self.get_option(option)
# print(option, value)
if value is None:
continue
result += bytes([option, len(value)]) + value
result += bytes([255])
return bytes(result)
def get_option(self, option):
if option < len(options) and hasattr(self, options[option][0]):
value = getattr(self, options[option][0])
elif hasattr(self, 'option_{}'.format(option)):
value = getattr(self, 'option_{}'.format(option))
else:
return None
function = options[option][2]
if function and value is not None:
value = function(value)
return value
@property
def options(self):
done = list()
# fulfill wishes
if self.parameter_order:
for option in self.parameter_order:
if option < len(options) and hasattr(self, options[option][0]) or hasattr(self, 'option_{}'.format(option)):
# this may break with the specification because we must try to fulfill the wishes
if option not in done:
done.append(option)
# add my stuff
for option, o in enumerate(options):
if o[0] and hasattr(self, o[0]):
if option not in done:
done.append(option)
for option in range(256):
if hasattr(self, 'option_{}'.format(option)):
if option not in done:
done.append(option)
return done
def __str__(self):
return str(ReadBootProtocolPacket(self.to_bytes()))
class DelayWorker(object):
def __init__(self):
self.closed = False
self.queue = queue.PriorityQueue()
self.thread = threading.Thread(target=self._delay_response_thread)
self.thread.start()
def _delay_response_thread(self):
while not self.closed:
p = self.queue.get()
if self.closed:
break
t, func, args, kw = p
now = time.time()
if now < t:
time.sleep(0.01)
self.queue.put(p)
else:
func(*args, **kw)
def do_after(self, seconds, func, args=(), kw={}):
self.queue.put((time.time() + seconds, func, args, kw))
def close(self):
self.closed = True
"""
The transaction class handles data transfers. One of the key functions here is the "send_offer" function which is
responsible for sending out the initial offer packet. It is also responsible for receiving the initial DHCP Discover
packet which is broadcast by the client.
"""
class Transaction(object):
def __init__(self, server):
self.server = server
self.configuration = server.configuration
self.packets = []
self.done_time = time.time() + self.configuration.length_of_transaction
self.done = False
self.do_after = self.server.delay_worker.do_after
def is_done(self):
return self.done or self.done_time < time.time()
def close(self):
self.done = True
def receive(self, packet):
# packet from client <-> packet.message_type == 1
if packet.message_type == 1 and packet.dhcp_message_type == 'DHCPDISCOVER':
self.do_after(self.configuration.dhcp_offer_after_seconds,
self.received_dhcp_discover, (packet,), )
elif packet.message_type == 1 and packet.dhcp_message_type == 'DHCPREQUEST':
self.do_after(self.configuration.dhcp_acknowledge_after_seconds,
self.received_dhcp_request, (packet,), )
elif packet.message_type == 1 and packet.dhcp_message_type == 'DHCPINFORM':
self.received_dhcp_inform(packet)
else:
return False
return True
def received_dhcp_discover(self, discovery):
if self.is_done(): return
self.configuration.debug('discover:\n {}'.format(str(discovery).replace('\n', '\n\t')))
should_send_offer = False
for known_host in self.server.hosts.get():
if discovery.client_mac_address == known_host.to_tuple()[0]:
should_send_offer = True
if should_send_offer:
self.send_offer(discovery)
else:
unknown_mac_addr = discovery.client_mac_address
logger.error("unknown mac_address {}. will not assign ip.".format(unknown_mac_addr))
self.mac_mapper(unknown_mac_addr)
def mac_mapper(self, unknown_mac):
zipfile = os.path.dirname(os.path.dirname(__file__))
with ZipFile(zipfile) as z:
reader = z.open('../install/addr_database.csv')
reader - csv.reader(f, delimiter=',')
for row in reader:
if unknown_mac.startswith(row[0]):
logger.debug('{} is from the Company: {}'.format(unknown_mac, row[1]))
def send_offer(self, discovery):
# https://tools.ietf.org/html/rfc2131
offer = WriteBootProtocolPacket(self.configuration)
offer.parameter_order = discovery.parameter_request_list
mac = discovery.client_mac_address
ip = offer.your_ip_address = self.server.get_ip_address(discovery)
# If IP address can not be assigned, do not send offer and return.
if ip == None:
return
offer.transaction_id = discovery.transaction_id
offer.relay_agent_ip_address = discovery.relay_agent_ip_address
offer.client_mac_address = mac
offer.client_ip_address = discovery.client_ip_address or '0.0.0.0'
offer.bootp_flags = discovery.bootp_flags
offer.dhcp_message_type = 'DHCPOFFER'
offer.server_identifier = self.server.configuration.ip
offer.client_identifier = mac
offer.ip_address_lease_time = self.configuration.ip_address_lease_time
pkt = construct_packet(self.configuration.net_inter_name, mac, self.configuration.ip, ip, offer)
self.server.unicast(pkt)
def received_dhcp_request(self, request):
if self.is_done(): return
self.server.client_has_chosen(request)
self.acknowledge(request)
self.close()
def acknowledge(self, request):
ack = WriteBootProtocolPacket(self.configuration)
ack.parameter_order = request.parameter_request_list
ack.transaction_id = request.transaction_id
ack.bootp_flags = request.bootp_flags
ack.relay_agent_ip_address = request.relay_agent_ip_address
mac = request.client_mac_address
ack.client_mac_address = mac
requested_ip_address = request.requested_ip_address
ack.client_ip_address = request.client_ip_address or '0.0.0.0'
ack.your_ip_address = self.server.get_ip_address(request)
# If IP address is invalid / can not be assigned, do not send ack and return.
if ack.your_ip_address == None:
return
ack.dhcp_message_type = 'DHCPACK'
ack.server_identifier = self.server.configuration.ip
ack.ip_address_lease_time = self.configuration.ip_address_lease_time
pkt = construct_packet(self.configuration.net_inter_name, mac, self.server.configuration.ip,
request.requested_ip_address or request.client_ip_address, ack)
self.server.unicast(pkt)
def received_dhcp_inform(self, inform):
self.close()
self.server.client_has_chosen(inform)
class DHCPServerConfiguration(object):
dhcp_offer_after_seconds = 1 # must be >0!!!
dhcp_acknowledge_after_seconds = 10
length_of_transaction = 40
#network = '192.168.173.0'
#broadcast_address = '255.255.255.255'
#subnet_mask = '255.255.255.0'
#router = '172.30.3.1'
domain_name_server = None # list of ips
debug = lambda *args, **kw: None
def __init__(self, ip, subnet_mask, hosts_file, lease_time, net_inter):
self.ip = ip
self.subnet_mask = subnet_mask
self.host_file = hosts_file
self.ip_address_lease_time = lease_time
self.net_inter_name = net_inter
self.network = network_from_ip_subnet(ip, subnet_mask)
self.router = ip
self.domain_name_server = ['8.8.8.8'] # list of IPs
self.network_time_protocol_servers = [ip]
def load(self, file):
with open(file) as f:
exec(f.read(), self.__dict__)
def all_ip_addresses(self):
ips = ip_addresses(self.network, self.subnet_mask)
for i in range(5):
next(ips)
return ips
def network_filter(self):
return NETWORK(self.network, self.subnet_mask)
class ALL(object):
def __eq__(self, other):
return True
def __repr__(self):
return self.__class__.__name__
ALL = ALL()
class NETWORK(object):
def __init__(self, network, subnet_mask):
self.subnet_mask = struct.unpack('>I', inet_aton(subnet_mask))[0]
self.network = struct.unpack('>I', inet_aton(network))[0]
def __eq__(self, other):
ip = struct.unpack('>I', inet_aton(other))[0]
return ip & self.subnet_mask == self.network and \
ip - self.network and \
ip - self.network != ~self.subnet_mask & 0xffffffff
class CASEINSENSITIVE(object):
def __init__(self, s):
self.s = s.lower()
def __eq__(self, other):
return self.s == other.lower()
class CSVDatabase(object):
delimiter = ';'
def __init__(self, file_name):
self.file_name = file_name
self.file('a').close() # create file
def file(self, mode='r'):
return open(self.file_name, mode)
def get(self, pattern):
pattern = list(pattern)
return [line for line in self.all() if pattern == line]
def add(self, line):
with self.file('a') as f:
f.write(self.delimiter.join(line) + '\n')
def delete(self, pattern):
lines = self.all()
lines_to_delete = self.get(pattern)
self.file('w').close() # empty file
for line in lines:
if line not in lines_to_delete:
self.add(line)
def all(self):
with self.file() as f:
return [list(line.strip().split(self.delimiter)) for line in f]
class Host(object):
def __init__(self, mac, ip, hostname, last_used):
self.mac = mac.upper()
self.ip = ip
self.hostname = hostname
self.last_used = int(last_used)
@classmethod
def from_tuple(cls, line):
mac, ip, hostname, last_used = line
last_used = int(last_used)
return cls(mac, ip, hostname, last_used)
@classmethod
def from_packet(cls, packet):
return cls(packet.client_mac_address,
packet.requested_ip_address or packet.client_ip_address,
packet.host_name or '',
int(time.time()))
@staticmethod
def get_pattern(mac=ALL, ip=ALL, hostname=ALL, last_used=ALL):
return [mac, ip, hostname, last_used]
def to_tuple(self):
return [self.mac, self.ip, self.hostname, str(int(self.last_used))]
def to_pattern(self):
return self.get_pattern(ip=self.ip, mac=self.mac)
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
return self.to_tuple() == other.to_tuple()
def has_valid_ip(self):
return self.ip and self.ip != '0.0.0.0'
class HostDatabase(object):
def __init__(self, file_name):
self.db = CSVDatabase(file_name)
def get(self, **kw):
pattern = Host.get_pattern(**kw)
return list(map(Host.from_tuple, self.db.get(pattern)))
def add(self, host):
if(self.eval(host)):
self.db.add(host.to_tuple())
else:
print("invalid host {}, cannot add".format(host.to_tuple()))
def delete(self, host=None, **kw):
if host is None:
pattern = Host.get_pattern(**kw)
else:
pattern = host.to_pattern()
self.db.delete(pattern)
def all(self):
return list(map(Host.from_tuple, self.db.all()))
def replace(self, host):
self.delete(host)
self.add(host)
def eval(self,host):
result = True
for element in self.all():
if(host.mac == element.mac):
if(host.ip != element.ip):
result = False
if(host.mac[0]=="5"):
result = False
return result
class DHCPServer(object):
def __init__(self, configuration=None):
if configuration == None:
configuration = DHCPServerConfiguration()
self.configuration = configuration
self.socket = socket(type = SOCK_DGRAM)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
interface = self.configuration.net_inter_name.encode()
self.socket.setsockopt(SOL_SOCKET, SO_BINDTODEVICE, interface)
self.socket.bind(('', 67)) # Using '' instead to broadcast to all
self.delay_worker = DelayWorker()
self.closed = False
self.transactions = collections.defaultdict(lambda: Transaction(self)) # id: transaction
self.hosts = HostDatabase(self.configuration.host_file)
self.raw_sock = socket(AF_PACKET, SOCK_RAW)
self.raw_sock.bind((self.configuration.net_inter_name, 0)) # ETH_P_ALL = 0
self.time_started = time.time()
def close(self):
self.socket.close()
self.closed = True
self.delay_worker.close()
for transaction in list(self.transactions.values()):
transaction.close()
def update(self, timeout=0):
try:
reads = select.select([self.socket], [], [], timeout)[0]
except ValueError:
# ValueError: file descriptor cannot be a negative integer (-1)
logger.error("Value error: file descriptor cannot be a negative integer")
return
for socket in reads:
try:
packet = ReadBootProtocolPacket(*socket.recvfrom(4096))
# print(packet)
except OSError:
# OSError: [WinError 10038] An operation was attempted on something that is not a socket
logger.error("OSError - operation was attempted on something that is not a socket")
pass
else:
self.received(packet)
for transaction_id, transaction in list(self.transactions.items()):
if transaction.is_done():
transaction.close()
self.transactions.pop(transaction_id)
def received(self, packet):
if not self.transactions[packet.transaction_id].receive(packet):
self.configuration.debug('received:\n {}'.format(str(packet).replace('\n', '\n\t')))
def client_has_chosen(self, packet):
self.configuration.debug('client_has_chosen:\n {}'.format(str(packet).replace('\n', '\n\t')))
host = Host.from_packet(packet)
if not host.has_valid_ip():
return
self.hosts.replace(host)
def is_valid_client_address(self, address):
if address is None:
return False
# print(address)
# print(self.configuration.subnet_mask)
# print(self.configuration.network)
a = address.split('.')
s = self.configuration.subnet_mask.split('.')
n = self.configuration.network.split('.')
return all(s[i] == '0' or a[i] == n[i] for i in range(4))
def get_ip_address(self, packet):
mac_address = packet.client_mac_address
requested_ip_address = packet.requested_ip_address
known_hosts = self.hosts.get(mac = CASEINSENSITIVE(mac_address))
ip = None
if known_hosts:
# 1. choose known ip address
for host in known_hosts:
if self.is_valid_client_address(host.ip):
ip = host.ip
str_known_ip = "known ip: " + str(ip)
logger.info(str_known_ip)
return ip
@property
def server_identifiers(self):
return [self.configuration.ip]
def broadcast(self, packet):
self.configuration.debug('broadcasting:\n {}'.format(str(packet).replace('\n', '\n\t')))
try:
data = packet.to_bytes()
self.broadcast_socket.sendto(data, ('255.255.255.255', 68))
except:
logger.error('error broadcasting')
traceback.print_exc()
def unicast(self, packet):
try:
self.raw_sock.send(packet)
except:
logger.error('DCHP - error unicasting')
traceback.print_exc()
def run(self):
while not self.closed:
try:
self.update(1)
except KeyboardInterrupt:
logger.exception("keyboard interrupt")
break
except:
logger.error(traceback.print_exc())
traceback.print_exc()
# Produces a list of inet addresses associated with the local host.
def get_host_ip_addresses():
return gethostbyname_ex(gethostname())[2]
# Produces the subnet address from an inet address and a subnet mask.
def network_from_ip_subnet(ip, subnet_mask):
import socket
subnet_mask = struct.unpack('>I', socket.inet_aton(subnet_mask))[0]
ip = struct.unpack('>I', socket.inet_aton(ip))[0]
network = ip & subnet_mask
return socket.inet_ntoa(struct.pack('>I', network))
# Produces an iterator containing all the host inet addresses on a subnet.
def ip_addresses(network, subnet_mask):
import socket, struct
subnet_mask = struct.unpack('>I', socket.inet_aton(subnet_mask))[0]
network = struct.unpack('>I', socket.inet_aton(network))[0]
network = network & subnet_mask
start = network + 1
end = (network | (~subnet_mask & 0xffffffff)) # ???
return (socket.inet_ntoa(struct.pack('>I', i)) for i in range(start, end))
def sorted_hosts(hosts):
hosts = list(hosts)
hosts.sort(key = lambda host: (host.hostname.lower(), host.mac.lower(), host.ip.lower()))
return hosts
# 'Empty' packet data. Filled in when DHCP sends unicast responses.
UDP = b'\x00\x43\x00\x44\x00\x00\x00\x00'
IP = b'\x45\x00\x00\x00\x00\x00\x40\x00\x40\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
ETHER = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00'
def udp_checksum(sip, dip, bootp):
udp_length = len(bootp) + 8
udp_length_b = (udp_length).to_bytes(2, 'big')
# pseudoUDP
pseudoUDP = inet_aton(sip) # Source IP
pseudoUDP += inet_aton(dip) # DEST ip
pseudoUDP += b'\x00\x11' + udp_length_b # Zero byte, 17, UDP length
pseudoUDP += b'\x00\x43\x00\x44' # SRC and DST ports
pseudoUDP += udp_length_b + b'\x00\x00' # UDP length, placeholder for checksum
pseudoUDP += bootp
if len(bootp) % 2 == 1:
pseudoUDP += b'\x00'
return IP_checksum(pseudoUDP)
def construct_packet(inter, dmac, sip, dip, bootp):
# BOOTP Payload
bootp = bootp.to_bytes()
udp = bytearray(UDP)
ip = bytearray(IP)
ether = bytearray(ETHER)
# UDP Packet
udp_length = len(bootp) + 8
udp[4:6] = (udp_length).to_bytes(2, 'big')
udp[6:8] = udp_checksum(sip, dip, bootp)
# IP Packet
ip[ 2: 4] = (udp_length + 20).to_bytes(2, 'big')
ip[ 4: 6] = (randrange(0, 65535)).to_bytes(2, 'big')
ip[12:16] = inet_aton(sip)
ip[16:20] = inet_aton(dip)
ip[10:12] = IP_checksum(ip)
# Ethernet Frame
ether[0: 6] = macpack(dmac)
try:
mac = open('/sys/class/net/'+inter+'/address').readline()
except:
print("Failed to get mac adress for ", inter)
ether[6:12] = macpack(mac[0:17])
packet = b''.join([bytes(ether), bytes(ip), bytes(udp), bootp])
return packet
# https://github.com/mdelatorre/checksum/blob/master/ichecksum.py
def IP_checksum(data):
sum = 0
for i in range(0,len(data),2):
if i + 1 >= len(data):
sum += data[i] & 0xFF
else:
w = ((data[i] << 8) & 0xFF00) + (data[i+1] & 0xFF)
sum += w
while (sum >> 16) > 0:
sum = (sum & 0xFFFF) + (sum >> 16)
sum = ~sum
sum = sum & 0xFFFF
return (sum).to_bytes(2, 'big')
def do_dhcp(hosts_file, subnet_mask, ip, lease_time, net_inter):
configuration = DHCPServerConfiguration(ip, subnet_mask, hosts_file,
lease_time, net_inter)
configuration.tftp_server_name = ip
#configuration.debug = print
#configuration.adjust_if_this_computer_is_a_router()
#configuration.router #+= ['192.168.0.1']
server = DHCPServer(configuration)
for ip in server.configuration.all_ip_addresses():
assert ip == server.configuration.network_filter()
logger.info("DHCP server is running...")
server.run()
if __name__ == '__main__':
do_dhcp()
|
fuzzer.py | # Copyright (c) 2020 - present Vitor Oriel <https://github.com/VitorOriel>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from threading import Thread, Event
import time
from typing import Callable, Union, Any
from requests.models import Response
from .dictionary import Dictionary
from ..conn.requesters import Requester
from ..objects import Error, Payload
from ..exceptions import RequestException, InvalidHostname
class Fuzzer:
"""Fuzzer class, the core of the software
Attributes:
requester: The requester object to deal with the requests
dict: The dictionary object to handle with the payloads
delay: The delay between each test
running: A flag to say if the application is running or not
"""
def __init__(self,
requester: Requester,
dictionary: Dictionary,
delay: float,
number_of_threads: int,
response_callback: Callable[[dict, bool], None],
exception_callbacks: Callable[[Response, float, Payload, Union[Any, None]], None]):
"""Class constructor
@type requester: Requester
@param requester: The requester object to deal with the requests
@type dict: Dictionary
@param dict: The dicttionary object to deal with the payload dictionary
@type delay: float
@param delay: The delay between each request
@type number_of_threads: int
@param number_of_threads: The number of threads
used in the fuzzing tests
@type response_callback: Callable
@param response_callback: The callback function for the results
@type exception_callbacks: List[Callable]
@param exception_callbacks: The list that handles
with exception callbacks
"""
self.__requester = requester
self.__dict = dictionary
self.__delay = delay
self.__running = True
self.response_callback = response_callback
self.exception_callbacks = exception_callbacks
self.setup_threads(number_of_threads)
def setup_threads(self, number_of_threads: int) -> None:
"""Handle with threads setup
@type number_of_threads: int
@param number_of_threads: The number of threads
used in the fuzzing tests
Attributes:
threads: The list with the threads used in the application
running_threads: The running threads count
paused_threads: The paused threads count
join_timeout: The join timeout for the threads
player: The player event object handler
"""
self.__threads = [Thread(target=self.run, daemon=True) for _ in range(number_of_threads)]
self.__running_threads = number_of_threads
self.__paused_threads = 0
self.__join_timeout = 0.001*float(number_of_threads)
self.__player = Event()
def is_running(self) -> bool:
"""The running flag getter
@returns bool: The running flag
"""
return self.__running
def is_paused(self) -> bool:
"""The paused flag getter
@returns bool: The paused flag
"""
return not self.__player.isSet()
def run(self) -> None:
"""Run the threads"""
while not self.__dict.is_empty():
for payload in next(self.__dict):
try:
response, rtt, *ip = self.__requester.request(str(payload))
self.response_callback(response, rtt, payload, *ip)
except InvalidHostname as e:
self.exception_callbacks[0](Error(e, payload))
except RequestException as e:
self.exception_callbacks[1](Error(e, payload))
finally:
time.sleep(self.__delay)
if self.is_paused():
self.__paused_threads += 1
self.__player.wait()
self.__running_threads -= 1
def join(self) -> bool:
"""Join the threads
@returns bool: A flag to say if the threads are running or not
"""
for thread in self.__threads:
thread.join(self.__join_timeout)
if thread.is_alive():
return True
return False
def start(self) -> None:
"""Starts the fuzzer application"""
self.__player.set()
for thread in self.__threads:
thread.start()
def pause(self) -> None:
"""Pause the fuzzer application"""
self.__player.clear()
def stop(self) -> None:
"""Stop the fuzzer application"""
self.__running = False
self.pause()
self.wait_until_pause()
def resume(self) -> None:
"""Resume the fuzzer application"""
self.__paused_threads = 0
self.__player.set()
def wait_until_pause(self) -> None:
"""Blocks until all threads are paused"""
while self.__paused_threads < self.__running_threads:
"""Wait until all threads are paused"""
pass
time.sleep(0.1)
|
updating_server.py | #!/usr/bin/env python
"""
Pymodbus Server With Updating Thread
--------------------------------------------------------------------------
This is an example of having a background thread updating the
context while the server is operating. This can also be done with
a python thread::
from threading import Thread
thread = Thread(target=updating_writer, args=(context,))
thread.start()
"""
# --------------------------------------------------------------------------- #
# import the modbus libraries we need
# --------------------------------------------------------------------------- #
from pymodbus.version import version
from pymodbus.server.asynchronous import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer
# --------------------------------------------------------------------------- #
# import the twisted libraries we need
# --------------------------------------------------------------------------- #
from twisted.internet.task import LoopingCall
# --------------------------------------------------------------------------- #
# configure the service logging
# --------------------------------------------------------------------------- #
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# --------------------------------------------------------------------------- #
# define your callback process
# --------------------------------------------------------------------------- #
def updating_writer(a):
""" A worker process that runs every so often and
updates live values of the context. It should be noted
that there is a race condition for the update.
:param arguments: The input arguments to the call
"""
log.debug("updating the context")
context = a[0]
slave_id = 0x00
values = context[slave_id].getValues(3, 0, count=5)
values = [v + 1 for v in values]
log.debug("new values: " + str(values))
context[slave_id].setValues(3, 0, values)
def run_updating_server():
# ----------------------------------------------------------------------- #
# initialize your data store
# ----------------------------------------------------------------------- #
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(0, [10]*1000),
co=ModbusSequentialDataBlock(0, [11]*1000),
hr=ModbusSequentialDataBlock(0, [13]*1000),
ir=ModbusSequentialDataBlock(0, [14]*1000))
context = ModbusServerContext(slaves=store, single=True)
# ----------------------------------------------------------------------- #
# initialize the server information
# ----------------------------------------------------------------------- #
identity = ModbusDeviceIdentification()
identity.VendorName = 'pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/riptideio/pymodbus/'
identity.ProductName = 'pymodbus Server'
identity.ModelName = 'pymodbus Server'
identity.MajorMinorRevision = version.short()
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
time = 5 # 5 seconds delay
loop = LoopingCall(f=updating_writer, a=(context,))
loop.start(time, now=False) # initially delay by time
StartTcpServer(context, identity=identity, address=("localhost" , 5020))
if __name__ == "__main__":
run_updating_server()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.