repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
theedhum-nandrum | theedhum-nandrum-master/src/tn/document/languagehelper.py | '''
@author mojosaurus
Language helper class
'''
from src.tn.document.collectiontuple import CollectionTuple
from src.tn.document.document import Document
import cld2
# This is the helper class for language classification in the context of theedhum nandrum.
# All languahe related functions go here.
class LanguageHelper:
def __init__(self):
pass
# Given a document, this function extracts all the emojis
def extractLanguageTags(self, document:Document):
# Iterate over each of the tagged portions of the document to identify the languages
absolutePos = 0
taggedIndex = 0
collection = []
for tagged in document.get("tagged"):
if tagged["lang"] == "emoji":
collection.append(tagged)
continue
text = tagged["text"]
isReliable, textBytesFound, details, vectors = cld2.detect(text, returnVectors=True)
#print(' reliable: %s' % (isReliable != 0))
#print(' textBytes: %s' % textBytesFound)
#print(' details: %s' % str(details))
i=0
for vector in vectors:
#print ("*************")
#print (vector)
#print (details[i])
start = vector[0]
end = vector[1]
#print ("Start : {}, end : {}".format(start, start+end))
#print (vector)
#print (text[start:start+end])
jiji = CollectionTuple(text=text[start:start+end])
jiji.set("relativePos", start)
jiji.set("absolutePos", start)
jiji.set("lang", vector[3])
jiji.set("len", end-start) # Length of this block
collection.append(jiji.getJiji())
i += 1
#print ("*************")
return collection | 1,899 | 34.849057 | 96 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/document/emojihelper.py | '''
@author mojosaurus
Theedhum Nandrum version of emoji class
'''
import emoji
from src.tn.document.collectiontuple import CollectionTuple
from src.tn.document.document import Document
# Emoji is a special case of CollectionTuple
class Emoji(CollectionTuple):
def __init__(self, lang : str="emoji", text:str=""):
# jiji is a random name to capture our version of emoji
self.jiji = {
"lang" : lang,
"text" : text,
"unicodeName" : 0,
"unicodeBlock" : 0
}
# This is the helper class for emojis in the context of theedhum nandrum.
# All emoji related functions go here.
class EmojiHelper:
def __init__(self):
pass
# Given a document, this function extracts all the emojis
def extractEmojiTags(self, document:Document):
# Iterate over each of the tagged portions of the document to identify the emojis
absolutePos = 0
taggedIndex = 0
collection = []
for tagged in document.get("tagged"):
currCollection = []
collection = []
relativePos = 0
for c in tagged:
# Iterate over each character to find if this charcter is an emoji
if c in emoji.UNICODE_EMOJI:
print ("{} is an emoji".format(c))
# Now that we have found an emoji, close currCollection
text="".join(currCollection)
if text != "":
jiji = CollectionTuple(text=text)
jiji.set("relativePos", relativePos)
jiji.set("absolutePos", absolutePos)
jiji.set("len", len(text))
collection.append(jiji.getJiji())
currCollection = []
relativePos += len(text)
absolutePos += len(text)
# Now add the emoji to collection
jiji = Emoji(lang="emoji", text=c)
jiji.set("relativePos", relativePos)
jiji.set("absolutePos", absolutePos)
jiji.set("len", 1)
collection.append(jiji.getJiji())
relativePos += 1
absolutePos += 1
# TODO: Add the rest of the attributes
else:
# Keep adding to the current row in collection
currCollection.append(c)
#relativePos += 1
#absolutePos += 1
text="".join(currCollection)
if text != "":
jiji = CollectionTuple(text=text)
jiji.set("relativePos", relativePos)
jiji.set("absolutePos", absolutePos)
jiji.set("len", len(text))
collection.append(jiji.getJiji())
# TODO: Replace the current tagged position with jijiCollection and increment the taggedIndex.
#print ("Collection is {}".format(collection))
# TODO: Replace the current tagged Index with this collection
taggedIndex += 1
return collection | 3,221 | 39.78481 | 106 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/document/spellcheckhelper.py | '''
@author mojosaurus
Spellcheck helper class
'''
from src.tn.document.collectiontuple import CollectionTuple
import src.tn.lib.spell as spell
from src.tn.document.document import Document
# This is the helper class for spell check and correction in the context of theedhum nandrum.
# Currently it takes handles only english spell check and correction.
# All spell-check and spell correct related functions go here.
class SpellCheckHelper:
def __init__(self):
pass
# Given a document, this function tries to correct incorrect spellings
def correct(self, document:Document):
# Iterate over each of the tagged portions of the document
# Note that language tagging should have been done before this step.
collection = []
for tagged in document.get("tagged"):
if tagged["lang"] == "emoji":
collection.append(tagged)
continue
if tagged["lang"] == "un":
text = tagged["text"]
corrected = []
for word in text.split(" "):
# TODO: More thoughts need to get into this.
cword = spell.correction(word)
print ("Original : {}, corrected : {}".format(word, cword))
corrected.append(cword)
jiji = CollectionTuple(text=" ".join(corrected))
collection.append(jiji.getJiji())
else:
# Other languages go here. It's empty for now.
collection.append(tagged)
continue
return collection | 1,623 | 37.666667 | 93 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/document/__init__.py | """ Package Initialization file. """ | 36 | 36 | 36 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/document/collectiontuple.py | '''
@author mojosaurus
This is a logical single block of the document
'''
class CollectionTuple:
def __init__(self, lang : str = "", text : str =""):
# jiji is a random name to capture the context of the tuple. Too sleepy to name it anything.
self.jiji = {
"lang" : lang,
"text" : text,
"negative" : 0,
"neutral" : 0,
"positive" : 0,
"sentimentScore" : 0,
}
def getJiji(self):
return self.jiji
def set(self, key : str, value : str):
self.jiji[key] = value | 603 | 26.454545 | 101 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/docproc/spellchecktagger.py | '''
@author mojosaurus
Technically, spellcheck is not a tagger. It works on the tags generated by language tagger.
Don't use this before languagetagger is used
'''
import os,sys
# Appeding our src directory to sys path so that we can import modules.
sys.path.append(os.path.join(os.path.dirname(__file__),'../../..'))
from src.tn.document.document import Document
from src.tn.docproc.pipeline import Tagger
from src.tn.document.spellcheckhelper import SpellCheckHelper
class SpellCheckTagger(Tagger):
def __init__(self, document=Document()):
self.document = document
self.helper = SpellCheckHelper()
print ("Inside object of type : {}".format(self.__class__.__name__))
# This is where the magix is done.
def execute(self):
print ("Before processing : {} : {}".format(self.__class__.__name__, self.document))
tagged = self.helper.correct(self.document)
self.document.set("tagged", tagged)
print ("After processing : {} : {}".format(self.__class__.__name__, self.document)) | 1,045 | 40.84 | 93 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/docproc/emojitagger.py | '''
@author mojosaurus
Tags emojis
'''
import os,sys
# Appeding our src directory to sys path so that we can import modules.
sys.path.append(os.path.join(os.path.dirname(__file__),'../../..'))
from src.tn.document.document import Document
from src.tn.docproc.pipeline import Tagger
from src.tn.document.emojihelper import Emoji, EmojiHelper
class EmojiTagger(Tagger):
def __init__(self, document=Document()):
self.document = document
self.helper = EmojiHelper()
print ("Inside object of type : {}".format(self.__class__.__name__))
# Replaces everything to lowercase, if it's latin alphabet.
def execute(self):
print ("Before processing : {} : {}".format(self.__class__.__name__, self.document))
tagged = self.helper.extractEmojiTags(self.document)
self.document.set("tagged", tagged)
#self.document.set("text", self.document.get("text").lower())
print ("After processing : {} : {}".format(self.__class__.__name__, self.document)) | 1,015 | 39.64 | 93 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/docproc/languagetagger.py | '''
@author mojosaurus
Tags languages
'''
import os,sys
# Appeding our src directory to sys path so that we can import modules.
sys.path.append(os.path.join(os.path.dirname(__file__),'../../..'))
from src.tn.document.document import Document
from src.tn.docproc.pipeline import Tagger
from src.tn.document.languagehelper import LanguageHelper
import cld2
class LanguageTagger(Tagger):
def __init__(self, document=Document()):
self.document = document
self.helper = LanguageHelper()
print ("Inside object of type : {}".format(self.__class__.__name__))
# Uses CLD2 library to identify and tags part of a multi-script sentence.
# TODO: Replace CLD2 with CLD3.
def execute(self):
print ("Before processing : {} : {}".format(self.__class__.__name__, self.document))
tagged = self.helper.extractLanguageTags(self.document)
self.document.set("tagged", tagged)
print ("After processing : {} : {}".format(self.__class__.__name__, self.document)) | 1,018 | 36.740741 | 93 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/docproc/lowercase.py | '''
@author mojosaurus
Replaces multiple whitespaces with one whitespace
'''
import os,sys
# Appeding our src directory to sys path so that we can import modules.
sys.path.append(os.path.join(os.path.dirname(__file__),'../../..'))
from src.tn.docproc.pipeline import Step
from src.tn.document.document import Document
class Lowercase(Step):
def __init__(self, document=Document()):
self.document = document
print ("Inside object of type : {}".format(self.__class__.__name__))
# Replaces everything to lowercase, if it's latin alphabet.
def execute(self):
print ("Before processing : {} : {}".format(self.__class__.__name__, self.document))
self.document.set("text", self.document.get("text").lower())
print ("After processing : {} : {}".format(self.__class__.__name__, self.document)) | 844 | 41.25 | 93 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/docproc/regexes.py | '''
@author mojosaurus
This step does a bunch of things as part of document cleanup. More details in comments below
'''
import os,sys
# Appeding our src directory to sys path so that we can import modules.
sys.path.append(os.path.join(os.path.dirname(__file__),'../../..'))
from src.tn.docproc.pipeline import Step
from src.tn.document.document import Document
import re
class Regexes(Step):
def __init__(self, document=Document()):
self.document = document
print ("Inside object of type : {}".format(self.__class__.__name__))
# Replaces everything to lowercase, if it's latin alphabet.
def execute(self):
'''
Note: Could have used a single regex like [r"(\.|\?|\!)+", "\\1"], but for some reason, the middle replacement is ignored
'''
regexes = [
[r"(\.)+", "\\1"], # 1. Multiple fullstops with one fullstop
[r"(\?)+", "\\1"], # 2. Multiple exclamation marks with one exclamation mark
[r"(\!)+", "\\1"], # 3. Multiple question marks with one question mark
[r"(#)+", "\\1"], # 4. Multiple hash with one question mark
[r"(\w)\1{2,}", "\\1\\1"], # 5. Replaces multiple(2+) occurance of the same letter with 2 occurances.
]
print ("Before processing : {} : {}".format(self.__class__.__name__, self.document))
for reg in regexes:
self.document.set("text", re.sub(reg[0], reg[1], self.document.get("text")))
print ("After processing : {} : {}".format(self.__class__.__name__, self.document)) | 1,560 | 47.78125 | 129 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/docproc/whitespace.py | '''
@author mojosaurus
Replaces multiple whitespaces with one whitespace
'''
import os,sys
# Appeding our src directory to sys path so that we can import modules.
sys.path.append(os.path.join(os.path.dirname(__file__),'../../..'))
from src.tn.docproc.pipeline import Step
from src.tn.document.document import Document
class Whitespace(Step):
def __init__(self, document=Document()):
self.document = document
print ("Inside object of type : {}".format(self.__class__.__name__))
# Replaces multiple whitespaces with one whitespace.
def execute(self):
print ("Before processing : {} : {}".format(self.__class__.__name__, self.document))
self.document.set("text", ' '.join(self.document.get("text").split()))
print ("After processing : {} : {}".format(self.__class__.__name__, self.document)) | 848 | 41.45 | 93 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/docproc/__init__.py | """ Package Initialization file. """ | 36 | 36 | 36 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/docproc/process.py | '''
@author mojosaurus
This is the file that executes the pipeline
'''
import os,sys
# Appeding our src directory to sys path so that we can import modules.
sys.path.append(os.path.join(os.path.dirname(__file__),'../../..'))
from src.tn.docproc.pipeline import Step
from src.tn.docproc.whitespace import Whitespace
from src.tn.docproc.lowercase import Lowercase
from src.tn.document.document import Document
from src.tn.docproc.regexes import Regexes
from src.tn.docproc.pipeline import Pipeline
from src.tn.docproc.emojitagger import EmojiTagger
from src.tn.docproc.languagetagger import LanguageTagger
from src.tn.docproc.spellchecktagger import SpellCheckTagger
if __name__ == "__main__":
text = "woooood issssss your oyster.... 🥰 ###!!! ప్రపంచం అంతా వెతికిన ధనుష్ 🤩 లాంటి మరో నటుడు దొరకడు, 🤩 சுயமாக சிந்திக்க தெரிஞ்சவன் தான் சூப்பர் ஹீரோ 🥰 ಬಠಪಢಝ ಜಂಅಂಇ ಋಋ ಡಘಫಫಝ ಡಝಫಷ"
text = "woooood issssss your oyester .... 🥰 ###!!! சுயமாக சிந்திக்க 🤩 beer"
doc = Document(text)
pipeline = Pipeline()
pipeline.addStep(Whitespace())
pipeline.addStep(Lowercase())
pipeline.addStep(Regexes())
pipeline.addStep(EmojiTagger())
pipeline.addStep(LanguageTagger())
pipeline.addStep(SpellCheckTagger())
pipeline.process(doc) | 1,261 | 39.709677 | 182 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/docproc/pipeline.py | '''
@author mojosaurus
This file defines docproc pipeline to clean up input data.
This might be broken down into multiple intermediary steps and the final output comes out at last.
'''
'''
This baseclass defines the list of functions that need to be implemented in each of the steps in the
pipeline.
Each step in the pipeline expects the input and output in a certain tuple format defined as JSON.
TODO: Add a reference to the tuple
'''
from src.tn.document.document import Document
# This is the base class that needs to be inherited for all Steps, except Taggers
class Step:
def __init__(self, document:Document):
print ("In baseclass")
self.document = document
# Inherited classes need to implement this method
def execute(self):
pass
# Do not implement this method in inherited class
def getDocument(self):
return self.document
# Do not implement this method in inherited class
def setDocument(self, document:Document):
self.document = document
# This is the base class that needs to be inherited for all Taggers in docproc pipeline
class Tagger(Step):
def __init__(self):
"Inside baseclass Tagger"
class Pipeline:
pipelineSteps = []
document = {}
def __init__(self):
pass
# Expects an object of type Step to be passed
def addStep(self, step:Step):
self.pipelineSteps.append(step)
# This function iterates over the steps and executes
def process(self, document):
for step in self.pipelineSteps:
#print (step.getDocument())
step.setDocument(document)
step.execute()
document = step.getDocument() | 1,691 | 29.214286 | 100 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/lib/sentimoji.py | """
Module to convert Unicode Emojis to corresponding Sentiment Rankings.
Based on the research by Kralj Novak P, Smailović J, Sluban B, Mozetič I
(2015) on Sentiment of Emojis.
Journal Link:
https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0144296
CSV Data acquired from CLARIN repository,
Repository Link: http://hdl.handle.net/11356/1048
"""
import csv
import logging
from os import path
logging.basicConfig(
level=logging.INFO,
format='%(process)d | %(levelname)s | %(message)s'
)
# pylint: disable=too-many-locals
def _build_dict_from_csv(csv_path):
""" Builds the Emoji to Sentiment dictionary from the CSV file. """
emoji_sentiment_rankings = {}
with open(csv_path, newline='', encoding='utf-8') as csv_file:
csv_reader = csv.reader(csv_file)
_header_row = next(csv_reader)
for row in csv_reader:
emoji = row[0]
unicode_codepoint = row[1]
occurrences = int(row[2])
position = float(row[3])
negative = float(row[4])
neutral = float(row[5])
positive = float(row[6])
unicode_name = row[7]
unicode_block = row[8]
sentiment_score = float(
'{:.3f}'.format((positive - negative) / occurrences)
)
emoji_sentiment_rankings[emoji] = {
'unicode_codepoint': unicode_codepoint,
'occurrences': occurrences,
'position': position,
'negative': negative,
'neutral': neutral,
'positive': positive,
'unicode_name': unicode_name,
'unicode_block': unicode_block,
'sentiment_score': sentiment_score
}
return emoji_sentiment_rankings
def get_emoji_sentiment_rank(emoji):
""" Returns the Sentiment Data mapped to the specified Emoji. """
return EMOJI_SENTIMENT_DICT[emoji] if emoji in EMOJI_SENTIMENT_DICT.keys() else False
EMOJI_SENTIMENT_DICT = _build_dict_from_csv(
path.join(
path.abspath(path.dirname(__file__)),
'../../../resources/data/Emoji_Sentiment_Data_v1.0.csv'
)
) | 2,211 | 29.722222 | 89 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/lib/feature_utils.py | from emoji import UNICODE_EMOJI
import re
import sys, os
from googletrans import Translator
from langdetect import detect
import math
from bisect import bisect_left
from .sentimoji import get_emoji_sentiment_rank
def load_docs(source, mode='train'):
documents = {'data': [], 'target_names': [], 'ids': []}
with open(source, 'r', encoding='utf-8') as inf:
# skipping header row
next(inf)
for line in inf:
if mode == 'predict':
(recid, review) = re.split('\t', line.strip())
documents['data'].append(review)
documents['ids'].append(recid)
else:
# both train and test have this format
(review, cat) = re.split('\t', line.strip())
documents['data'].append(review)
documents['target_names'].append(cat)
return documents
def get_all_emojis():
if not hasattr(get_all_emojis, "all_emojis"):
get_all_emojis.all_emojis = {}
for c in UNICODE_EMOJI:
get_all_emojis.all_emojis['has-emoji({})'.format(c)] = (False)
return get_all_emojis.all_emojis
# The emoji feature classifier
def document_emoji_feature(document_words, features):
all_emojis = get_all_emojis()
features.update(all_emojis)
allchars = set(''.join(document_words))
score = 0.0
emojis = []
for c in allchars:
if c in UNICODE_EMOJI:
emojis.append(c)
features['has-emoji({})'.format(c)] = (True)
sentiment = get_emoji_sentiment_rank(c)
if sentiment is not False:
score += sentiment['sentiment_score']
features['emoji-positive'] = (False)
features['emoji-negative'] = (False)
features['emoji-neutral'] = (False)
if len(emojis) > 0:
score /= len(emojis)
if score > 0.2:
features['emoji-positive'] = (True)
elif score < -0.2:
features['emoji-negative'] = (True)
else:
features['emoji-neutral'] = (True)
def get_emojis_from_text(text):
score = 0.0
# Putting in a random emoji to avoid empty data
emojis = ["🦻"]
for c in text:
if c in UNICODE_EMOJI:
emojis.append(c)
sentiment = get_emoji_sentiment_rank(c)
if sentiment is not False:
score += sentiment['sentiment_score']
if len(emojis) > 0:
score /= len(emojis)
if score > 0.2:
label = 'Positive'
elif score < -0.2:
label = 'Negative'
else:
label = 'Neutral'
return ((emojis, label))
def get_doc_len_range(document_words):
return (get_range(len(document_words)))
def get_range(doclen):
ranges = ["1-10", "11-20", "21-30", "31-40", "41-50", "51-60", "61-70", "71-80", "81-90", "91-100", "101-110", "111-120", "121-130", "131-140",
"141-150", "151-160", "161-170", "171-180", "181-190", "191-200", ">200"]
breakpoints = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100,
110, 120, 130, 140, 150, 160, 170, 180, 190, math.inf]
index = bisect_left(breakpoints, doclen)
return ranges[index]
def get_language(text):
#translator = Translator()
try:
return(detect(text))
except:
return("unknown")
# if (language.confidence > 0.7): return language.lang
# return "unknown"
def detect_lang_and_store(inputfile, outputfile):
with open(inputfile) as inf, open(outputfile, "w") as f:
for text in inf:
# Intentional re-init of object - https://stackoverflow.com/questions/49497391/googletrans-api-error-expecting-value-line-1-column-1-char-0
translator = Translator()
try:
text = text.strip()
language = translator.detect(text)
f.write(text + "\t" + language.lang + "\t" + str(language.confidence) + "\n")
except Exception as e:
print(str(e))
continue
f.close()
if __name__ == "__main__":
# features = {}
# document_words = 'ugh 🤢'
# document_emoji_feature(document_words, features)
# print(features)
# document_words = 'கலக்கல் 🤩'
# document_emoji_feature(document_words, features)
# print(features)
# detect_lang_and_store(["idhu enna maayam", "sundari kannaal oru sedhi", "malalayali aano", "கலக்கல்", "nandri hai"], "/tmp/languages_tmp.tsv")
detect_lang_and_store(os.path.join(os.path.dirname(sys.path[0]),'../../resources/data/alltexts.txt'), os.path.join(os.path.dirname(sys.path[0]),'../../resources/data/alltextslang.txt')) | 4,517 | 34.857143 | 189 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/lib/__init__.py | """ Package Initialization file. """ | 36 | 36 | 36 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/lib/Singleton.py | class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls] | 237 | 38.666667 | 81 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/lib/spell.py | """Spelling Corrector in Python 3; see http://norvig.com/spell-correct.html
Copyright (c) 2007-2016 Peter Norvig
MIT license: www.opensource.org/licenses/mit-license.php
"""
################ Spelling Corrector
import re
import os
from collections import Counter
def words(text): return re.findall(r'\w+', text.lower())
WORDS = Counter(words(open(os.path.join(os.path.dirname(__file__),'../../../resources/data/big.txt')).read()))
def P(word, N=sum(WORDS.values())):
"Probability of `word`."
return WORDS[word] / N
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
'''
NOTE: Commenting the test suite. Ishwar.
################ Test Code
def unit_tests():
assert correction('speling') == 'spelling' # insert
assert correction('korrectud') == 'corrected' # replace 2
assert correction('bycycle') == 'bicycle' # replace
assert correction('inconvient') == 'inconvenient' # insert 2
assert correction('arrainged') == 'arranged' # delete
assert correction('peotry') =='poetry' # transpose
assert correction('peotryy') =='poetry' # transpose + delete
assert correction('word') == 'word' # known
assert correction('quintessential') == 'quintessential' # unknown
assert words('This is a TEST.') == ['this', 'is', 'a', 'test']
assert Counter(words('This is a test. 123; A TEST this is.')) == (
Counter({'123': 1, 'a': 2, 'is': 2, 'test': 2, 'this': 2}))
assert len(WORDS) == 32198
print (sum(WORDS.values()))
assert sum(WORDS.values()) == 1115585
assert WORDS.most_common(10) == [
('the', 79808),
('of', 40024),
('and', 38311),
('to', 28765),
('in', 22020),
('a', 21124),
('that', 12512),
('he', 12401),
('was', 11410),
('it', 10681)]
assert WORDS['the'] == 79808
assert P('quintessential') == 0
assert 0.07 < P('the') < 0.08
return 'unit_tests pass'
def spelltest(tests, verbose=False):
"Run correction(wrong) on all (right, wrong) pairs; report results."
import time
start = time.clock()
good, unknown = 0, 0
n = len(tests)
for right, wrong in tests:
w = correction(wrong)
good += (w == right)
if w != right:
unknown += (right not in WORDS)
if verbose:
print('correction({}) => {} ({}); expected {} ({})'
.format(wrong, w, WORDS[w], right, WORDS[right]))
dt = time.clock() - start
print('{:.0%} of {} correct ({:.0%} unknown) at {:.0f} words per second '
.format(good / n, n, unknown / n, n / dt))
def Testset(lines):
"Parse 'right: wrong1 wrong2' lines into [('right', 'wrong1'), ('right', 'wrong2')] pairs."
return [(right, wrong)
for (right, wrongs) in (line.split(':') for line in lines)
for wrong in wrongs.split()]
if __name__ == '__main__':
print(unit_tests())
spelltest(Testset(open('../../../resources/data/spell-testset1.txt')))
spelltest(Testset(open('../../../resources/data/spell-testset2.txt')))
#spelltest(Testset(open('spell-testset2.txt')))
''' | 4,253 | 36.646018 | 110 | py |
pycpd | pycpd-master/setup.py | from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='pycpd',
version='2.0.2',
description='Pure Numpy Implementation of the Coherent Point Drift Algorithm',
long_description=readme(),
url='https://github.com/siavashk/pycpd',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
],
keywords='image processing, point cloud, registration, mesh, surface',
author='Siavash Khallaghi',
author_email='siavashk@ece.ubc.ca',
license='MIT',
packages=['pycpd'],
install_requires=['numpy', 'future'],
zip_safe=False)
| 954 | 30.833333 | 84 | py |
pycpd | pycpd-master/testing/constrained_deformable_test.py | import pytest
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from pycpd import gaussian_kernel, ConstrainedDeformableRegistration
def test_2D():
X = np.loadtxt('data/fish_target.txt')
Y = np.loadtxt('data/fish_source.txt')
# simulate a pointcloud missing certain parts
X = X[:61]
# select fixed correspondences
src_id = np.int32([1,10,20,30])
tgt_id = np.int32([1,10,20,30])
reg = ConstrainedDeformableRegistration(**{'X': X, 'Y': Y}, e_alpha = 1e-8, source_id = src_id, target_id = tgt_id)
TY, _ = reg.register()
assert_array_almost_equal(X[tgt_id], TY[src_id], decimal=1)
def test_3D():
fish_target = np.loadtxt('data/fish_target.txt')
fish_target = fish_target[:61]
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
# select fixed correspondences
src_id = np.int32([1,10,20,30, len(Y1)+1, len(Y1)+10, len(Y1)+20, len(Y1)+30])
tgt_id = np.int32([1,10,20,30, len(X1)+1, len(X1)+10, len(X1)+20, len(X1)+30])
reg = ConstrainedDeformableRegistration(**{'X': X, 'Y': Y}, e_alpha = 1e-8, source_id = src_id, target_id = tgt_id)
TY, _ = reg.register()
assert_array_almost_equal(TY[src_id], X[tgt_id], decimal=0)
def test_3D_low_rank():
fish_target = np.loadtxt('data/fish_target.txt')
fish_target = fish_target[:61]
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
# select fixed correspondences
src_id = np.int32([1,10,20,30, len(Y1)+1, len(Y1)+10, len(Y1)+20, len(Y1)+30])
tgt_id = np.int32([1,10,20,30, len(X1)+1, len(X1)+10, len(X1)+20, len(X1)+30])
reg = ConstrainedDeformableRegistration(**{'X': X, 'Y': Y, 'low_rank': True}, e_alpha = 1e-8, source_id = src_id, target_id = tgt_id)
TY, _ = reg.register()
assert_array_almost_equal(TY[src_id], X[tgt_id], decimal=0)
| 2,734 | 35.959459 | 137 | py |
pycpd | pycpd-master/testing/rigid_test.py | import pytest
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from pycpd import RigidRegistration
def test_2D():
theta = np.pi / 6.0
R = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
t = np.array([0.5, 1.0])
Y = np.loadtxt('data/fish_target.txt')
X = np.dot(Y, R) + np.tile(t, (np.shape(Y)[0], 1))
reg = RigidRegistration(**{'X': X, 'Y': Y})
TY, (s_reg, R_reg, t_reg) = reg.register()
assert_almost_equal(1.0, s_reg)
assert_array_almost_equal(R, R_reg)
assert_array_almost_equal(t, t_reg)
assert_array_almost_equal(X, TY)
def test_3D():
theta = np.pi / 6.0
R = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
t = np.array([0.5, 1.0, -2.0])
fish_target = np.loadtxt('data/fish_target.txt')
Y = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
Y[:, :-1] = fish_target
X = np.dot(Y, R) + np.tile(t, (np.shape(Y)[0], 1))
reg = RigidRegistration(**{'X': X, 'Y': Y})
TY, (s_reg, R_reg, t_reg) = reg.register()
assert_almost_equal(1.0, s_reg)
assert_array_almost_equal(R, R_reg)
assert_array_almost_equal(t, t_reg)
assert_array_almost_equal(X, TY)
| 1,310 | 30.97561 | 72 | py |
pycpd | pycpd-master/testing/affine_test.py | import pytest
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from pycpd import AffineRegistration
def test_2D():
B = np.array([[1.0, 0.5], [0, 1.0]])
t = np.array([0.5, 1.0])
Y = np.loadtxt('data/fish_target.txt')
X = np.dot(Y, B) + np.tile(t, (np.shape(Y)[0], 1))
reg = AffineRegistration(**{'X': X, 'Y': Y})
TY, (B_reg, t_reg) = reg.register()
assert_array_almost_equal(B, B_reg)
assert_array_almost_equal(t, t_reg)
assert_array_almost_equal(X, TY)
def test_3D():
B = np.array([[1.0, 0.5, 0.0], [0, 1.0, 0.0], [0.0, 0.0, 1.0]])
t = np.array([0.5, 1.0, -2.0])
fish_target = np.loadtxt('data/fish_target.txt')
Y1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
Y1[:, :-1] = fish_target
Y2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
Y2[:, :-1] = fish_target
Y = np.vstack((Y1, Y2))
X = np.dot(Y, B) + np.tile(t, (np.shape(Y)[0], 1))
reg = AffineRegistration(**{'X': X, 'Y': Y})
TY, (B_reg, t_reg) = reg.register()
assert_array_almost_equal(B, B_reg)
assert_array_almost_equal(t, t_reg)
assert_array_almost_equal(X, TY)
| 1,194 | 29.641026 | 72 | py |
pycpd | pycpd-master/testing/deformable_test.py | import pytest
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from pycpd import gaussian_kernel, DeformableRegistration
def test_2D():
X = np.loadtxt('data/fish_target.txt')
Y = np.loadtxt('data/fish_source.txt')
reg = DeformableRegistration(**{'X': X, 'Y': Y})
TY, _ = reg.register()
assert_array_almost_equal(X, TY, decimal=1)
def test_3D():
fish_target = np.loadtxt('data/fish_target.txt')
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
reg = DeformableRegistration(**{'X': X, 'Y': Y})
TY, _ = reg.register()
assert_array_almost_equal(TY, X, decimal=0)
def test_3D_low_rank():
fish_target = np.loadtxt('data/fish_target.txt')
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
reg = DeformableRegistration(**{'X': X, 'Y': Y, 'low_rank': True})
TY, _ = reg.register()
assert_array_almost_equal(TY, X, decimal=0)
rand_pts = np.random.randint(Y.shape[0], size=int(Y.shape[0]/2))
TY2 = reg.transform_point_cloud(Y=Y[rand_pts, :])
assert_array_almost_equal(TY2, X[rand_pts, :], decimal=0)
| 1,999 | 33.482759 | 72 | py |
pycpd | pycpd-master/examples/fish_constrained_deformable_3D_lowrank.py | from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pycpd import ConstrainedDeformableRegistration
import numpy as np
fish_source = np.loadtxt('data/fish_source.txt')
marker_size = 100
N_pts_include = 61
IDs = [1,10,20,30]
IDs_Y = IDs + [fish_source.shape[0] + i for i in IDs]
IDs_X = IDs + [N_pts_include + i for i in IDs]
def visualize(iteration, error, X, Y, ax):
plt.cla()
ids_X = np.arange(0, X.shape[0])
ids_X = np.delete(ids_X, IDs_X)
ids_Y = np.arange(0, Y.shape[0])
ids_Y = np.delete(ids_Y, IDs_Y)
ax.scatter(X[ids_X, 0], X[ids_X, 1], X[ids_X, 2], color='red', label='Target')
ax.scatter(Y[ids_Y, 0], Y[ids_Y, 1], Y[ids_Y, 2], color='blue', label='Source')
ax.scatter(X[IDs_X, 0], X[IDs_X, 1], X[IDs_X, 2], color='red', label='Target Constrained', s=marker_size, facecolors='none')
ax.scatter(Y[IDs_Y, 0], Y[IDs_Y, 1], Y[IDs_Y, 2], color='green', label='Source Constrained', s=marker_size, marker=(5, 1))
ax.text2D(0.87, 0.92, 'Iteration: {:d}'.format(
iteration), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main():
fish_target = np.loadtxt('data/fish_target.txt')
#simulate a pointcloud missing certain parts
fish_target = fish_target[:61]
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
# select fixed correspondences
# select fixed correspondences
src_id = np.int32(IDs_Y)
tgt_id = np.int32(IDs_X)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax)
reg = ConstrainedDeformableRegistration(**{'X': X, 'Y': Y, 'low_rank': True}, e_alpha = 1e-8, source_id = src_id, target_id = tgt_id)
reg.register(callback)
plt.show()
if __name__ == '__main__':
main()
| 2,401 | 32.830986 | 137 | py |
pycpd | pycpd-master/examples/fish_affine_2D.py | from functools import partial
import matplotlib.pyplot as plt
from pycpd import AffineRegistration
import numpy as np
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], color='blue', label='Source')
plt.text(0.87, 0.92, 'Iteration: {:d}\nQ: {:06.4f}'.format(
iteration, error), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main(true_affine=True):
X = np.loadtxt('data/fish_target.txt')
if true_affine is True:
theta = np.pi / 6.0
R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
shear = np.array([[1, 0.5], [0, 1]])
R = np.dot(R, shear)
t = np.array([0.5, 1.0])
Y = np.dot(X, R) + t
else:
Y = np.loadtxt('data/fish_source.txt')
fig = plt.figure()
fig.add_axes([0, 0, 1, 1])
callback = partial(visualize, ax=fig.axes[0])
reg = AffineRegistration(**{'X': X, 'Y': Y})
reg.register(callback)
plt.show()
if __name__ == '__main__':
main(true_affine=True)
| 1,248 | 29.463415 | 128 | py |
pycpd | pycpd-master/examples/fish_deformable_3D_register_with_subset_of_points.py | from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pycpd import DeformableRegistration
import numpy as np
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], X[:, 2], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], color='blue', label='Source')
ax.text2D(0.87, 0.92, 'Iteration: {:d}'.format(
iteration), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main():
fish_target = np.loadtxt('data/fish_target.txt')
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
Ysubset = Y[1::2, :]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax)
reg = DeformableRegistration(**{'X': X, 'Y': Ysubset})
reg.register(callback)
YT = reg.transform_point_cloud(Y=Y)
ax.scatter(YT[:, 0], YT[:, 1], YT[:, 2],
alpha=0.5,
color='green',
label='Source - Full Data')
ax.legend(loc='upper left', fontsize='x-large')
plt.show()
if __name__ == '__main__':
main()
| 1,710 | 29.553571 | 121 | py |
pycpd | pycpd-master/examples/fish_rigid_3D.py | from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pycpd import RigidRegistration
import numpy as np
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], X[:, 2], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], color='blue', label='Source')
ax.text2D(0.87, 0.92, 'Iteration: {:d}\nQ: {:06.4f}'.format(
iteration, error), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main(true_rigid=True):
fish_target = np.loadtxt('data/fish_target.txt')
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
if true_rigid is True:
theta = np.pi / 6.0
R = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
t = np.array([0.5, 1.0, 0.0])
Y = np.dot(X, R) + t
else:
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax)
reg = RigidRegistration(**{'X': X, 'Y': Y})
reg.register(callback)
plt.show()
if __name__ == '__main__':
main(true_rigid=True)
| 1,740 | 33.137255 | 128 | py |
pycpd | pycpd-master/examples/fish_deformable_2D.py | from functools import partial
import matplotlib.pyplot as plt
from pycpd import DeformableRegistration
import numpy as np
import time
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], color='blue', label='Source')
plt.text(0.87, 0.92, 'Iteration: {:d}'.format(
iteration), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main():
X = np.loadtxt('data/fish_target.txt')
Y = np.loadtxt('data/fish_source.txt')
fig = plt.figure()
fig.add_axes([0, 0, 1, 1])
callback = partial(visualize, ax=fig.axes[0])
reg = DeformableRegistration(**{'X': X, 'Y': Y})
reg.register(callback)
plt.show()
if __name__ == '__main__':
main()
| 922 | 26.147059 | 121 | py |
pycpd | pycpd-master/examples/bunny_rigid_3D.py | import argparse
from functools import partial
import matplotlib.pyplot as plt
from pycpd import RigidRegistration
import numpy as np
import os
def visualize(iteration, error, X, Y, ax, fig, save_fig=False):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], X[:, 2], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], color='blue', label='Source')
ax.text2D(0.87, 0.92, 'Iteration: {:d}\nQ: {:06.4f}'.format(
iteration, error), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
ax.view_init(90, -90)
if save_fig is True:
ax.set_axis_off()
plt.draw()
if save_fig is True:
os.makedirs("./images/rigid_bunny/", exist_ok=True)
fig.savefig("./images/rigid_bunny/rigid_bunny_3D_{:04}.tiff".format(iteration), dpi=600) # Used for making gif.
plt.pause(0.001)
def main(save=False):
print(save)
X = np.loadtxt('data/bunny_target.txt')
# synthetic data, equaivalent to X + 1
Y = np.loadtxt('data/bunny_source.txt')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax, fig=fig, save_fig=save[0] if type(save) is list else save)
reg = RigidRegistration(**{'X': X, 'Y': Y})
reg.register(callback)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Rigid registration example")
parser.add_argument(
"-s",
"--save",
type=bool,
nargs="+",
default=False,
help="True or False - to save figures of the example for a GIF etc.",
)
args = parser.parse_args()
print(args)
main(**vars(args))
| 1,749 | 29.701754 | 128 | py |
pycpd | pycpd-master/examples/fish_constrained_deformable_2D.py | from functools import partial
import matplotlib.pyplot as plt
from pycpd import ConstrainedDeformableRegistration
import numpy as np
import time
IDs = [1,10,20,30]
marker_size = 100
def visualize(iteration, error, X, Y, ax):
plt.cla()
ids_X = np.arange(0, X.shape[0])
ids_X = np.delete(ids_X, IDs)
ids_Y = np.arange(0, Y.shape[0])
ids_Y = np.delete(ids_Y, IDs)
ax.scatter(X[ids_X, 0], X[ids_X, 1], color='red', label='Target')
ax.scatter(Y[ids_Y, 0], Y[ids_Y, 1], color='blue', label='Source')
ax.scatter(X[IDs, 0], X[IDs, 1], color='blue', label='Target Constrained', s=marker_size, facecolors='none')
ax.scatter(Y[IDs, 0], Y[IDs, 1], color='green', label='Source Constrained', s=marker_size, marker=(5, 1))
plt.text(0.87, 0.92, 'Iteration: {:d}'.format(
iteration), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main():
X = np.loadtxt('data/fish_target.txt')
Y = np.loadtxt('data/fish_source.txt')
# simulate a pointcloud missing certain parts
X = X[:61]
# select fixed correspondences
src_id = np.int32(IDs)
tgt_id = np.int32(IDs)
fig = plt.figure()
fig.add_axes([0, 0, 1, 1])
callback = partial(visualize, ax=fig.axes[0])
# e_alpha can be tuned (default: 1e-8). the smaller the value, the more confidence it will have in the correspodence
reg = ConstrainedDeformableRegistration(**{'X': X, 'Y': Y}, e_alpha = 1e-8, source_id = src_id, target_id = tgt_id)
reg.register(callback)
plt.show()
if __name__ == '__main__':
main()
| 1,703 | 30.555556 | 121 | py |
pycpd | pycpd-master/examples/fish_deformable_3D_lowrank.py | from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pycpd import DeformableRegistration
import numpy as np
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], X[:, 2], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], color='blue', label='Source')
ax.text2D(0.87, 0.92, 'Iteration: {:d}'.format(
iteration), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main():
fish_target = np.loadtxt('data/fish_target.txt')
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax)
reg = DeformableRegistration(**{'X': X, 'Y': Y, 'low_rank': True})
reg.register(callback)
plt.show()
if __name__ == '__main__':
main()
| 1,457 | 31.4 | 121 | py |
pycpd | pycpd-master/examples/fish_constrained_deformable_3D.py | from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pycpd import ConstrainedDeformableRegistration
import numpy as np
fish_source = np.loadtxt('data/fish_source.txt')
marker_size = 100
N_pts_include = 61
IDs = [1,10,20,30]
IDs_Y = IDs + [fish_source.shape[0] + i for i in IDs]
IDs_X = IDs + [N_pts_include + i for i in IDs]
def visualize(iteration, error, X, Y, ax):
plt.cla()
ids_X = np.arange(0, X.shape[0])
ids_X = np.delete(ids_X, IDs_X)
ids_Y = np.arange(0, Y.shape[0])
ids_Y = np.delete(ids_Y, IDs_Y)
ax.scatter(X[ids_X, 0], X[ids_X, 1], X[ids_X, 2], color='red', label='Target')
ax.scatter(Y[ids_Y, 0], Y[ids_Y, 1], Y[ids_Y, 2], color='blue', label='Source')
ax.scatter(X[IDs_X, 0], X[IDs_X, 1], X[IDs_X, 2], color='red', label='Target Constrained', s=marker_size, facecolors='none')
ax.scatter(Y[IDs_Y, 0], Y[IDs_Y, 1], Y[IDs_Y, 2], color='green', label='Source Constrained', s=marker_size, marker=(5, 1))
ax.text2D(0.87, 0.92, 'Iteration: {:d}'.format(
iteration), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main():
fish_target = np.loadtxt('data/fish_target.txt')
#simulate a pointcloud missing certain parts
fish_target = fish_target[:N_pts_include]
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
print(Y1.shape)
print(X1.shape)
# select fixed correspondences
src_id = np.int32(IDs_Y)
tgt_id = np.int32(IDs_X)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax)
reg = ConstrainedDeformableRegistration(**{'X': X, 'Y': Y}, e_alpha = 1e-8, source_id = src_id, target_id = tgt_id)
reg.register(callback)
plt.show()
if __name__ == '__main__':
main()
| 2,395 | 30.526316 | 129 | py |
pycpd | pycpd-master/examples/fish_affine_3D.py | from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pycpd import AffineRegistration
import numpy as np
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], X[:, 2], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], color='blue', label='Source')
ax.text2D(0.87, 0.92, 'Iteration: {:d}\nQ: {:06.4f}'.format(
iteration, error), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main(true_affine=True):
fish_target = np.loadtxt('data/fish_target.txt')
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
if true_affine is True:
theta = np.pi / 6.0
R = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
t = np.array([0.5, 1.0, 0.0])
# Create shear matrix
shear_matrix = [[1, 0, 0.5], [0, 1, 4], [0, 1, 1]]
R = np.dot(R, shear_matrix)
Y = np.dot(X, R) + t
else:
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax)
reg = AffineRegistration(**{'X': X, 'Y': Y})
reg.register(callback)
plt.show()
if __name__ == '__main__':
main(true_affine=True)
| 1,873 | 31.877193 | 128 | py |
pycpd | pycpd-master/examples/fish_rigid_2D.py | from functools import partial
import matplotlib.pyplot as plt
from pycpd import RigidRegistration
import numpy as np
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], color='blue', label='Source')
plt.text(0.87, 0.92, 'Iteration: {:d}\nQ: {:06.4f}'.format(
iteration, error), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main(true_rigid=True):
X = np.loadtxt('data/fish_target.txt')
if true_rigid is True:
theta = np.pi / 6.0
R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
t = np.array([0.5, 1.0])
Y = np.dot(X, R) + t
else:
Y = np.loadtxt('data/fish_source.txt')
fig = plt.figure()
fig.add_axes([0, 0, 1, 1])
callback = partial(visualize, ax=fig.axes[0])
reg = RigidRegistration(**{'X': X, 'Y': Y})
reg.register(callback)
plt.show()
if __name__ == '__main__':
main(true_rigid=True)
| 1,169 | 29 | 128 | py |
pycpd | pycpd-master/examples/fish_deformable_3D.py | from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pycpd import DeformableRegistration
import numpy as np
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], X[:, 2], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], color='blue', label='Source')
ax.text2D(0.87, 0.92, 'Iteration: {:d}'.format(
iteration), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main():
fish_target = np.loadtxt('data/fish_target.txt')
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax)
reg = DeformableRegistration(**{'X': X, 'Y': Y})
reg.register(callback)
plt.show()
if __name__ == '__main__':
main()
| 1,439 | 31 | 121 | py |
pycpd | pycpd-master/pycpd/constrained_deformable_registration.py | from builtins import super
import numpy as np
import numbers
from .deformable_registration import DeformableRegistration
class ConstrainedDeformableRegistration(DeformableRegistration):
"""
Constrained deformable registration.
Attributes
----------
alpha: float (positive)
Represents the trade-off between the goodness of maximum likelihood fit and regularization.
beta: float(positive)
Width of the Gaussian kernel.
e_alpha: float (positive)
Reliability of correspondence priors. Between 1e-8 (very reliable) and 1 (very unreliable)
source_id: numpy.ndarray (int)
Indices for the points to be used as correspondences in the source array
target_id: numpy.ndarray (int)
Indices for the points to be used as correspondences in the target array
"""
def __init__(self, e_alpha = None, source_id = None, target_id= None, *args, **kwargs):
super().__init__(*args, **kwargs)
if e_alpha is not None and (not isinstance(e_alpha, numbers.Number) or e_alpha <= 0):
raise ValueError(
"Expected a positive value for regularization parameter e_alpha. Instead got: {}".format(e_alpha))
if type(source_id) is not np.ndarray or source_id.ndim != 1:
raise ValueError(
"The source ids (source_id) must be a 1D numpy array of ints.")
if type(target_id) is not np.ndarray or target_id.ndim != 1:
raise ValueError(
"The target ids (target_id) must be a 1D numpy array of ints.")
self.e_alpha = 1e-8 if e_alpha is None else e_alpha
self.source_id = source_id
self.target_id = target_id
self.P_tilde = np.zeros((self.M, self.N))
self.P_tilde[self.source_id, self.target_id] = 1
self.P1_tilde = np.sum(self.P_tilde, axis=1)
self.PX_tilde = np.dot(self.P_tilde, self.X)
def update_transform(self):
"""
Calculate a new estimate of the deformable transformation.
See Eq. 22 of https://arxiv.org/pdf/0905.2635.pdf.
"""
if self.low_rank is False:
A = np.dot(np.diag(self.P1), self.G) + \
self.sigma2*(1/self.e_alpha)*np.dot(np.diag(self.P1_tilde), self.G) + \
self.alpha * self.sigma2 * np.eye(self.M)
B = self.PX - np.dot(np.diag(self.P1), self.Y) + self.sigma2*(1/self.e_alpha)*(self.PX_tilde - np.dot(np.diag(self.P1_tilde), self.Y))
self.W = np.linalg.solve(A, B)
elif self.low_rank is True:
# Matlab code equivalent can be found here:
# https://github.com/markeroon/matlab-computer-vision-routines/tree/master/third_party/CoherentPointDrift
dP = np.diag(self.P1) + self.sigma2*(1/self.e_alpha)*np.diag(self.P1_tilde)
dPQ = np.matmul(dP, self.Q)
F = self.PX - np.dot(np.diag(self.P1), self.Y) + self.sigma2*(1/self.e_alpha)*(self.PX_tilde - np.dot(np.diag(self.P1_tilde), self.Y))
self.W = 1 / (self.alpha * self.sigma2) * (F - np.matmul(dPQ, (
np.linalg.solve((self.alpha * self.sigma2 * self.inv_S + np.matmul(self.Q.T, dPQ)),
(np.matmul(self.Q.T, F))))))
QtW = np.matmul(self.Q.T, self.W)
self.E = self.E + self.alpha / 2 * np.trace(np.matmul(QtW.T, np.matmul(self.S, QtW))) | 3,411 | 43.894737 | 147 | py |
pycpd | pycpd-master/pycpd/affine_registration.py | from builtins import super
import numpy as np
from .emregistration import EMRegistration
from .utility import is_positive_semi_definite
class AffineRegistration(EMRegistration):
"""
Affine registration.
Attributes
----------
B: numpy array (semi-positive definite)
DxD affine transformation matrix.
t: numpy array
1xD initial translation vector.
"""
# Additional parameters used in this class, but not inputs.
# YPY: float
# Denominator value used to update the scale factor.
# Defined in Fig. 2 and Eq. 8 of https://arxiv.org/pdf/0905.2635.pdf.
# X_hat: numpy array
# Centered target point cloud.
# Defined in Fig. 2 of https://arxiv.org/pdf/0905.2635.pdf
def __init__(self, B=None, t=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if B is not None and ((B.ndim != 2) or (B.shape[0] != self.D) or (B.shape[1] != self.D) or not is_positive_semi_definite(B)):
raise ValueError(
'The rotation matrix can only be initialized to {}x{} positive semi definite matrices. Instead got: {}.'.format(self.D, self.D, B))
if t is not None and ((t.ndim != 2) or (t.shape[0] != 1) or (t.shape[1] != self.D)):
raise ValueError(
'The translation vector can only be initialized to 1x{} positive semi definite matrices. Instead got: {}.'.format(self.D, t))
self.B = np.eye(self.D) if B is None else B
self.t = np.atleast_2d(np.zeros((1, self.D))) if t is None else t
self.YPY = None
self.X_hat = None
self.A = None
def update_transform(self):
"""
Calculate a new estimate of the rigid transformation.
"""
# source and target point cloud means
muX = np.divide(np.sum(self.PX, axis=0), self.Np)
muY = np.divide(
np.sum(np.dot(np.transpose(self.P), self.Y), axis=0), self.Np)
self.X_hat = self.X - np.tile(muX, (self.N, 1))
Y_hat = self.Y - np.tile(muY, (self.M, 1))
self.A = np.dot(np.transpose(self.X_hat), np.transpose(self.P))
self.A = np.dot(self.A, Y_hat)
self.YPY = np.dot(np.transpose(Y_hat), np.diag(self.P1))
self.YPY = np.dot(self.YPY, Y_hat)
# Calculate the new estimate of affine parameters using update rules for (B, t)
# as defined in Fig. 3 of https://arxiv.org/pdf/0905.2635.pdf.
self.B = np.linalg.solve(np.transpose(self.YPY), np.transpose(self.A))
self.t = np.transpose(
muX) - np.dot(np.transpose(self.B), np.transpose(muY))
def transform_point_cloud(self, Y=None):
"""
Update a point cloud using the new estimate of the affine transformation.
Attributes
----------
Y: numpy array, optional
Array of points to transform - use to predict on new set of points.
Best for predicting on new points not used to run initial registration.
If None, self.Y used.
Returns
-------
If Y is None, returns None.
Otherwise, returns the transformed Y.
"""
if Y is None:
self.TY = np.dot(self.Y, self.B) + np.tile(self.t, (self.M, 1))
return
else:
return np.dot(Y, self.B) + np.tile(self.t, (Y.shape[0], 1))
def update_variance(self):
"""
Update the variance of the mixture model using the new estimate of the affine transformation.
See the update rule for sigma2 in Fig. 3 of of https://arxiv.org/pdf/0905.2635.pdf.
"""
qprev = self.q
trAB = np.trace(np.dot(self.A, self.B))
xPx = np.dot(np.transpose(self.Pt1), np.sum(
np.multiply(self.X_hat, self.X_hat), axis=1))
trBYPYP = np.trace(np.dot(np.dot(self.B, self.YPY), self.B))
self.q = (xPx - 2 * trAB + trBYPYP) / (2 * self.sigma2) + \
self.D * self.Np/2 * np.log(self.sigma2)
self.diff = np.abs(self.q - qprev)
self.sigma2 = (xPx - trAB) / (self.Np * self.D)
if self.sigma2 <= 0:
self.sigma2 = self.tolerance / 10
def get_registration_parameters(self):
"""
Return the current estimate of the affine transformation parameters.
Returns
-------
B: numpy array
DxD affine transformation matrix.
t: numpy array
1xD translation vector.
"""
return self.B, self.t
| 4,535 | 33.892308 | 147 | py |
pycpd | pycpd-master/pycpd/utility.py | import numpy as np
def is_positive_semi_definite(R):
if not isinstance(R, (np.ndarray, np.generic)):
raise ValueError('Encountered an error while checking if the matrix is positive semi definite. \
Expected a numpy array, instead got : {}'.format(R))
return np.all(np.linalg.eigvals(R) > 0)
def gaussian_kernel(X, beta, Y=None):
if Y is None:
Y = X
diff = X[:, None, :] - Y[None, :, :]
diff = np.square(diff)
diff = np.sum(diff, 2)
return np.exp(-diff / (2 * beta**2))
def low_rank_eigen(G, num_eig):
"""
Calculate num_eig eigenvectors and eigenvalues of gaussian matrix G.
Enables lower dimensional solving.
"""
S, Q = np.linalg.eigh(G)
eig_indices = list(np.argsort(np.abs(S))[::-1][:num_eig])
Q = Q[:, eig_indices] # eigenvectors
S = S[eig_indices] # eigenvalues.
return Q, S
| 878 | 30.392857 | 104 | py |
pycpd | pycpd-master/pycpd/deformable_registration.py | from builtins import super
import numpy as np
import numbers
from .emregistration import EMRegistration
from .utility import gaussian_kernel, low_rank_eigen
class DeformableRegistration(EMRegistration):
"""
Deformable registration.
Attributes
----------
alpha: float (positive)
Represents the trade-off between the goodness of maximum likelihood fit and regularization.
beta: float(positive)
Width of the Gaussian kernel.
low_rank: bool
Whether to use low rank approximation.
num_eig: int
Number of eigenvectors to use in lowrank calculation.
"""
def __init__(self, alpha=None, beta=None, low_rank=False, num_eig=100, *args, **kwargs):
super().__init__(*args, **kwargs)
if alpha is not None and (not isinstance(alpha, numbers.Number) or alpha <= 0):
raise ValueError(
"Expected a positive value for regularization parameter alpha. Instead got: {}".format(alpha))
if beta is not None and (not isinstance(beta, numbers.Number) or beta <= 0):
raise ValueError(
"Expected a positive value for the width of the coherent Gaussian kerenl. Instead got: {}".format(beta))
self.alpha = 2 if alpha is None else alpha
self.beta = 2 if beta is None else beta
self.W = np.zeros((self.M, self.D))
self.G = gaussian_kernel(self.Y, self.beta)
self.low_rank = low_rank
self.num_eig = num_eig
if self.low_rank is True:
self.Q, self.S = low_rank_eigen(self.G, self.num_eig)
self.inv_S = np.diag(1./self.S)
self.S = np.diag(self.S)
self.E = 0.
def update_transform(self):
"""
Calculate a new estimate of the deformable transformation.
See Eq. 22 of https://arxiv.org/pdf/0905.2635.pdf.
"""
if self.low_rank is False:
A = np.dot(np.diag(self.P1), self.G) + \
self.alpha * self.sigma2 * np.eye(self.M)
B = self.PX - np.dot(np.diag(self.P1), self.Y)
self.W = np.linalg.solve(A, B)
elif self.low_rank is True:
# Matlab code equivalent can be found here:
# https://github.com/markeroon/matlab-computer-vision-routines/tree/master/third_party/CoherentPointDrift
dP = np.diag(self.P1)
dPQ = np.matmul(dP, self.Q)
F = self.PX - np.matmul(dP, self.Y)
self.W = 1 / (self.alpha * self.sigma2) * (F - np.matmul(dPQ, (
np.linalg.solve((self.alpha * self.sigma2 * self.inv_S + np.matmul(self.Q.T, dPQ)),
(np.matmul(self.Q.T, F))))))
QtW = np.matmul(self.Q.T, self.W)
self.E = self.E + self.alpha / 2 * np.trace(np.matmul(QtW.T, np.matmul(self.S, QtW)))
def transform_point_cloud(self, Y=None):
"""
Update a point cloud using the new estimate of the deformable transformation.
Attributes
----------
Y: numpy array, optional
Array of points to transform - use to predict on new set of points.
Best for predicting on new points not used to run initial registration.
If None, self.Y used.
Returns
-------
If Y is None, returns None.
Otherwise, returns the transformed Y.
"""
if Y is not None:
G = gaussian_kernel(X=Y, beta=self.beta, Y=self.Y)
return Y + np.dot(G, self.W)
else:
if self.low_rank is False:
self.TY = self.Y + np.dot(self.G, self.W)
elif self.low_rank is True:
self.TY = self.Y + np.matmul(self.Q, np.matmul(self.S, np.matmul(self.Q.T, self.W)))
return
def update_variance(self):
"""
Update the variance of the mixture model using the new estimate of the deformable transformation.
See the update rule for sigma2 in Eq. 23 of of https://arxiv.org/pdf/0905.2635.pdf.
"""
qprev = self.sigma2
# The original CPD paper does not explicitly calculate the objective functional.
# This functional will include terms from both the negative log-likelihood and
# the Gaussian kernel used for regularization.
self.q = np.inf
xPx = np.dot(np.transpose(self.Pt1), np.sum(
np.multiply(self.X, self.X), axis=1))
yPy = np.dot(np.transpose(self.P1), np.sum(
np.multiply(self.TY, self.TY), axis=1))
trPXY = np.sum(np.multiply(self.TY, self.PX))
self.sigma2 = (xPx - 2 * trPXY + yPy) / (self.Np * self.D)
if self.sigma2 <= 0:
self.sigma2 = self.tolerance / 10
# Here we use the difference between the current and previous
# estimate of the variance as a proxy to test for convergence.
self.diff = np.abs(self.sigma2 - qprev)
def get_registration_parameters(self):
"""
Return the current estimate of the deformable transformation parameters.
Returns
-------
self.G: numpy array
Gaussian kernel matrix.
self.W: numpy array
Deformable transformation matrix.
"""
return self.G, self.W
| 5,308 | 35.613793 | 120 | py |
pycpd | pycpd-master/pycpd/__init__.py | """
This is a pure numpy implementation of the coherent point drift [CPD](https://arxiv.org/abs/0905.2635/)
algorithm by Myronenko and Song. It provides three registration methods for point clouds:
1. Scale and rigid registration
2. Affine registration
3. Gaussian regularized non-rigid registration
Licensed under an MIT License (c) 2010-2016 Siavash Khallaghi.
Distributed here: https://github.com/siavashk/pycpd
"""
from .rigid_registration import RigidRegistration
from .affine_registration import AffineRegistration
from .deformable_registration import gaussian_kernel, DeformableRegistration
from .constrained_deformable_registration import ConstrainedDeformableRegistration
| 685 | 39.352941 | 103 | py |
pycpd | pycpd-master/pycpd/rigid_registration.py | from builtins import super
import numpy as np
import numbers
from .emregistration import EMRegistration
from .utility import is_positive_semi_definite
class RigidRegistration(EMRegistration):
"""
Rigid registration.
Attributes
----------
R: numpy array (semi-positive definite)
DxD rotation matrix. Any well behaved matrix will do,
since the next estimate is a rotation matrix.
t: numpy array
1xD initial translation vector.
s: float (positive)
scaling parameter.
A: numpy array
Utility array used to calculate the rotation matrix.
Defined in Fig. 2 of https://arxiv.org/pdf/0905.2635.pdf.
"""
# Additional parameters used in this class, but not inputs.
# YPY: float
# Denominator value used to update the scale factor.
# Defined in Fig. 2 and Eq. 8 of https://arxiv.org/pdf/0905.2635.pdf.
# X_hat: numpy array
# Centered target point cloud.
# Defined in Fig. 2 of https://arxiv.org/pdf/0905.2635.pdf.
def __init__(self, R=None, t=None, s=None, scale=True, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.D != 2 and self.D != 3:
raise ValueError(
'Rigid registration only supports 2D or 3D point clouds. Instead got {}.'.format(self.D))
if R is not None and ((R.ndim != 2) or (R.shape[0] != self.D) or (R.shape[1] != self.D) or not is_positive_semi_definite(R)):
raise ValueError(
'The rotation matrix can only be initialized to {}x{} positive semi definite matrices. Instead got: {}.'.format(self.D, self.D, R))
if t is not None and ((t.ndim != 2) or (t.shape[0] != 1) or (t.shape[1] != self.D)):
raise ValueError(
'The translation vector can only be initialized to 1x{} positive semi definite matrices. Instead got: {}.'.format(self.D, t))
if s is not None and (not isinstance(s, numbers.Number) or s <= 0):
raise ValueError(
'The scale factor must be a positive number. Instead got: {}.'.format(s))
self.R = np.eye(self.D) if R is None else R
self.t = np.atleast_2d(np.zeros((1, self.D))) if t is None else t
self.s = 1 if s is None else s
self.scale = scale
def update_transform(self):
"""
Calculate a new estimate of the rigid transformation.
"""
# target point cloud mean
muX = np.divide(np.sum(self.PX, axis=0),
self.Np)
# source point cloud mean
muY = np.divide(
np.sum(np.dot(np.transpose(self.P), self.Y), axis=0), self.Np)
self.X_hat = self.X - np.tile(muX, (self.N, 1))
# centered source point cloud
Y_hat = self.Y - np.tile(muY, (self.M, 1))
self.YPY = np.dot(np.transpose(self.P1), np.sum(
np.multiply(Y_hat, Y_hat), axis=1))
self.A = np.dot(np.transpose(self.X_hat), np.transpose(self.P))
self.A = np.dot(self.A, Y_hat)
# Singular value decomposition as per lemma 1 of https://arxiv.org/pdf/0905.2635.pdf.
U, _, V = np.linalg.svd(self.A, full_matrices=True)
C = np.ones((self.D, ))
C[self.D-1] = np.linalg.det(np.dot(U, V))
# Calculate the rotation matrix using Eq. 9 of https://arxiv.org/pdf/0905.2635.pdf.
self.R = np.transpose(np.dot(np.dot(U, np.diag(C)), V))
# Update scale and translation using Fig. 2 of https://arxiv.org/pdf/0905.2635.pdf.
if self.scale is True:
self.s = np.trace(np.dot(np.transpose(self.A), np.transpose(self.R))) / self.YPY
else:
pass
self.t = np.transpose(muX) - self.s * \
np.dot(np.transpose(self.R), np.transpose(muY))
def transform_point_cloud(self, Y=None):
"""
Update a point cloud using the new estimate of the rigid transformation.
Attributes
----------
Y: numpy array
Point cloud to be transformed - use to predict on new set of points.
Best for predicting on new points not used to run initial registration.
If None, self.Y used.
Returns
-------
If Y is None, returns None.
Otherwise, returns the transformed Y.
"""
if Y is None:
self.TY = self.s * np.dot(self.Y, self.R) + self.t
return
else:
return self.s * np.dot(Y, self.R) + self.t
def update_variance(self):
"""
Update the variance of the mixture model using the new estimate of the rigid transformation.
See the update rule for sigma2 in Fig. 2 of of https://arxiv.org/pdf/0905.2635.pdf.
"""
qprev = self.q
trAR = np.trace(np.dot(self.A, self.R))
xPx = np.dot(np.transpose(self.Pt1), np.sum(
np.multiply(self.X_hat, self.X_hat), axis=1))
self.q = (xPx - 2 * self.s * trAR + self.s * self.s * self.YPY) / \
(2 * self.sigma2) + self.D * self.Np/2 * np.log(self.sigma2)
self.diff = np.abs(self.q - qprev)
self.sigma2 = (xPx - self.s * trAR) / (self.Np * self.D)
if self.sigma2 <= 0:
self.sigma2 = self.tolerance / 10
def get_registration_parameters(self):
"""
Return the current estimate of the rigid transformation parameters.
Returns
-------
self.s: float
Current estimate of the scale factor.
self.R: numpy array
Current estimate of the rotation matrix.
self.t: numpy array
Current estimate of the translation vector.
"""
return self.s, self.R, self.t
| 5,743 | 35.820513 | 147 | py |
pycpd | pycpd-master/pycpd/emregistration.py | from __future__ import division
import numpy as np
import numbers
from warnings import warn
def initialize_sigma2(X, Y):
"""
Initialize the variance (sigma2).
Attributes
----------
X: numpy array
NxD array of points for target.
Y: numpy array
MxD array of points for source.
Returns
-------
sigma2: float
Initial variance.
"""
(N, D) = X.shape
(M, _) = Y.shape
diff = X[None, :, :] - Y[:, None, :]
err = diff ** 2
return np.sum(err) / (D * M * N)
def lowrankQS(G, beta, num_eig, eig_fgt=False):
"""
Calculate eigenvectors and eigenvalues of gaussian matrix G.
!!!
This function is a placeholder for implementing the fast
gauss transform. It is not yet implemented.
!!!
Attributes
----------
G: numpy array
Gaussian kernel matrix.
beta: float
Width of the Gaussian kernel.
num_eig: int
Number of eigenvectors to use in lowrank calculation of G
eig_fgt: bool
If True, use fast gauss transform method to speed up.
"""
# if we do not use FGT we construct affinity matrix G and find the
# first eigenvectors/values directly
if eig_fgt is False:
S, Q = np.linalg.eigh(G)
eig_indices = list(np.argsort(np.abs(S))[::-1][:num_eig])
Q = Q[:, eig_indices] # eigenvectors
S = S[eig_indices] # eigenvalues.
return Q, S
elif eig_fgt is True:
raise Exception('Fast Gauss Transform Not Implemented!')
class EMRegistration(object):
"""
Expectation maximization point cloud registration.
Attributes
----------
X: numpy array
NxD array of target points.
Y: numpy array
MxD array of source points.
TY: numpy array
MxD array of transformed source points.
sigma2: float (positive)
Initial variance of the Gaussian mixture model.
N: int
Number of target points.
M: int
Number of source points.
D: int
Dimensionality of source and target points
iteration: int
The current iteration throughout registration.
max_iterations: int
Registration will terminate once the algorithm has taken this
many iterations.
tolerance: float (positive)
Registration will terminate once the difference between
consecutive objective function values falls within this tolerance.
w: float (between 0 and 1)
Contribution of the uniform distribution to account for outliers.
Valid values span 0 (inclusive) and 1 (exclusive).
q: float
The objective function value that represents the misalignment between source
and target point clouds.
diff: float (positive)
The absolute difference between the current and previous objective function values.
P: numpy array
MxN array of probabilities.
P[m, n] represents the probability that the m-th source point
corresponds to the n-th target point.
Pt1: numpy array
Nx1 column array.
Multiplication result between the transpose of P and a column vector of all 1s.
P1: numpy array
Mx1 column array.
Multiplication result between P and a column vector of all 1s.
Np: float (positive)
The sum of all elements in P.
"""
def __init__(self, X, Y, sigma2=None, max_iterations=None, tolerance=None, w=None, *args, **kwargs):
if type(X) is not np.ndarray or X.ndim != 2:
raise ValueError(
"The target point cloud (X) must be at a 2D numpy array.")
if type(Y) is not np.ndarray or Y.ndim != 2:
raise ValueError(
"The source point cloud (Y) must be a 2D numpy array.")
if X.shape[1] != Y.shape[1]:
raise ValueError(
"Both point clouds need to have the same number of dimensions.")
if sigma2 is not None and (not isinstance(sigma2, numbers.Number) or sigma2 <= 0):
raise ValueError(
"Expected a positive value for sigma2 instead got: {}".format(sigma2))
if max_iterations is not None and (not isinstance(max_iterations, numbers.Number) or max_iterations < 0):
raise ValueError(
"Expected a positive integer for max_iterations instead got: {}".format(max_iterations))
elif isinstance(max_iterations, numbers.Number) and not isinstance(max_iterations, int):
warn("Received a non-integer value for max_iterations: {}. Casting to integer.".format(max_iterations))
max_iterations = int(max_iterations)
if tolerance is not None and (not isinstance(tolerance, numbers.Number) or tolerance < 0):
raise ValueError(
"Expected a positive float for tolerance instead got: {}".format(tolerance))
if w is not None and (not isinstance(w, numbers.Number) or w < 0 or w >= 1):
raise ValueError(
"Expected a value between 0 (inclusive) and 1 (exclusive) for w instead got: {}".format(w))
self.X = X
self.Y = Y
self.TY = Y
self.sigma2 = initialize_sigma2(X, Y) if sigma2 is None else sigma2
(self.N, self.D) = self.X.shape
(self.M, _) = self.Y.shape
self.tolerance = 0.001 if tolerance is None else tolerance
self.w = 0.0 if w is None else w
self.max_iterations = 100 if max_iterations is None else max_iterations
self.iteration = 0
self.diff = np.inf
self.q = np.inf
self.P = np.zeros((self.M, self.N))
self.Pt1 = np.zeros((self.N, ))
self.P1 = np.zeros((self.M, ))
self.PX = np.zeros((self.M, self.D))
self.Np = 0
def register(self, callback=lambda **kwargs: None):
"""
Perform the EM registration.
Attributes
----------
callback: function
A function that will be called after each iteration.
Can be used to visualize the registration process.
Returns
-------
self.TY: numpy array
MxD array of transformed source points.
registration_parameters:
Returned params dependent on registration method used.
"""
self.transform_point_cloud()
while self.iteration < self.max_iterations and self.diff > self.tolerance:
self.iterate()
if callable(callback):
kwargs = {'iteration': self.iteration,
'error': self.q, 'X': self.X, 'Y': self.TY}
callback(**kwargs)
return self.TY, self.get_registration_parameters()
def get_registration_parameters(self):
"""
Placeholder for child classes.
"""
raise NotImplementedError(
"Registration parameters should be defined in child classes.")
def update_transform(self):
"""
Placeholder for child classes.
"""
raise NotImplementedError(
"Updating transform parameters should be defined in child classes.")
def transform_point_cloud(self):
"""
Placeholder for child classes.
"""
raise NotImplementedError(
"Updating the source point cloud should be defined in child classes.")
def update_variance(self):
"""
Placeholder for child classes.
"""
raise NotImplementedError(
"Updating the Gaussian variance for the mixture model should be defined in child classes.")
def iterate(self):
"""
Perform one iteration of the EM algorithm.
"""
self.expectation()
self.maximization()
self.iteration += 1
def expectation(self):
"""
Compute the expectation step of the EM algorithm.
"""
P = np.sum((self.X[None, :, :] - self.TY[:, None, :])**2, axis=2) # (M, N)
P = np.exp(-P/(2*self.sigma2))
c = (2*np.pi*self.sigma2)**(self.D/2)*self.w/(1. - self.w)*self.M/self.N
den = np.sum(P, axis = 0, keepdims = True) # (1, N)
den = np.clip(den, np.finfo(self.X.dtype).eps, None) + c
self.P = np.divide(P, den)
self.Pt1 = np.sum(self.P, axis=0)
self.P1 = np.sum(self.P, axis=1)
self.Np = np.sum(self.P1)
self.PX = np.matmul(self.P, self.X)
def maximization(self):
"""
Compute the maximization step of the EM algorithm.
"""
self.update_transform()
self.transform_point_cloud()
self.update_variance()
| 8,645 | 30.67033 | 115 | py |
SPONGE | SPONGE-main/main.py | # copied from the main.py file of the old McSAS:
import argparse
import logging
import multiprocessing
import sys, os
from sys import platform
# adapted from: https://stackoverflow.com/questions/8220108/how-do-i-check-the-operating-system-in-python
def isLinux():
return platform == "linux" or platform == "linux2"
def isMac():
return platform == "darwin"
def isWindows():
return platform == "win32"
# from log import replaceStdOutErr
# from utils import isMac, isLinux
def getScriptPath():
"""Returns the full path to the current script file which calls this
function."""
thisFile = sys.executable # works for frozen app
try: # __file__ not defined if frozen
thisFile = os.path.join(os.getcwd(), __file__)
except NameError: pass
thisFile = os.path.abspath(thisFile)
path, fn = os.path.split(thisFile)
if os.path.isfile(path):
path = os.path.dirname(path)
return path, fn
# get script/executable location, add to module search path
SCRIPT_PATH, SCRIPT_FILENAME = getScriptPath()
if not hasattr(sys, "frozen") and SCRIPT_PATH not in sys.path:
sys.path.append(SCRIPT_PATH)
# FIXME? sphinx assumes the upper directory as root,
# prefixes every module path with *mcsas*
# -> do it the same way? Would simplify some things ...
if (isLinux() and hasattr(sys, "frozen")
and "LD_LIBRARY_PATH" not in os.environ):
os.environ["LD_LIBRARY_PATH"] = SCRIPT_PATH
import s
def makeAbsolutePath(relpath):
return os.path.abspath(os.path.join(SCRIPT_PATH, relpath))
def main(argv = None):
parser = argparse.ArgumentParser(description = """
Simulates small-angle scattering patterns from
STL-descriptions of object surfaces. Can include
polydispersity in size (uniformly scaling in all dimensions)
Cobbled together by Brian R. Pauw.
Released under a GPLv3+ license.
""")
# TODO: add info about output files to be created ...
parser.add_argument("-f", "--efName", type = str, default = None,
help = "Path to excel filename containing the sim settings")
parser.add_argument("-g", "--group", type = str, default = None,
help = "simulation group to work on")
parser.add_argument("-n", "--numProcesses", type = int, default = None,
help = "Maximum number of parallel processes. Default is number of cores on system.")
parser.add_argument("-s", "--filename", type = str, default = None,
help = "FOR DIRECT CALL: input STL filename")
parser.add_argument("-q", "--qmin", type = float, default = 0.01,
help = "FOR DIRECT CALL: minimum q value")
parser.add_argument("-Q", "--qmax", type = float, default = 2,
help = "FOR DIRECT CALL: maximum q value")
parser.add_argument("-N", "--nq", type = int, default = 100,
help = "FOR DIRECT CALL: number of q values")
parser.add_argument("-R", "--nrep", type = int, default = 100,
help = "FOR DIRECT CALL: number of repetitions")
parser.add_argument("-P", "--npoints", type = int, default = 1000,
help = "FOR DIRECT CALL: number of points in object")
parser.add_argument("-M", "--mu", type = float, default = 1,
help = "FOR DIRECT CALL: mean of Gaussian size distribution")
parser.add_argument("-S", "--sigma", type = float, default = 0.01,
help = "FOR DIRECT CALL: width (sigma) of Gaussian size distribution")
parser.add_argument("-O", "--ofname", type = str, default = None,
help = "FOR DIRECT CALL: output filename")
parser.add_argument("-D", "--projectDirectory", type = str, default = '.',
help = "FOR DIRECT CALL: project directory (starting point)")
if isMac():
# on OSX remove automatically provided PID,
# otherwise argparse exits and the bundle start fails silently
for i in range(len(sys.argv)):
if sys.argv[i].startswith("-psn"): # PID provided by osx
del sys.argv[i]
try:
args = parser.parse_args()
except SystemExit as e:
# useful debugging code, ensure destination is writable!
# logfn = ("/tmp/{name}_unsupported_args.log"
# .format(name = SCRIPT_FILENAME))
# with open(logfn, "w") as fd:
# fd.write("argv: " + str(sys.argv) + "\n")
raise
# initiate logging (to console stderr for now)
# replaceStdOutErr() # replace all text output with our sinks
adict = vars(args)
s.s(**adict)
if __name__ == "__main__":
multiprocessing.freeze_support()
#manager=pyplot.get_current_fig_manager()
#print manager
#process input arguments
main()
| 4,736 | 40.920354 | 105 | py |
SPONGE | SPONGE-main/nexuswriter.py | # -*- coding: utf-8 -*-
# nexuswriter.py
# Author: Brian R. Pauw
# License: GPLv3
# helper function for writing datasets:
import os
import h5py
import numpy as np
class NeXusWriter(object):
"""
Writes the provided data and keys into a NeXus-/NXcanSAS-conform structure in an HDF5 file
"""
_attrDict = {}
_filename = None
_overwrite = None
_inputMapping = {}
_requiredInputKeys = ["Q", "I", "IError"]
######## INPUT KEYWORD-VALUE MAPPING IS DEFINED HERE #########
"""
The following dictionary identifies possible input keywords to NeXusWriter,
and their respective mapping paths to datasets.
Any mapping paths containing an '@'-symbol will link to the attribute table. In
that case, the element of the mapping path after the '@'-symbol is assumed to be the
name of the attribute
"""
_inputMapping = {
"Q" : "/sasentry1/sasdata1/Q", # dataset
"I" : "/sasentry1/sasdata1/I", # dataset
"IError" : "/sasentry1/sasdata1/Idev", # dataset
"Qdev" : "/sasentry1/sasdata1/Qdev", # optional dataset
"wavelength" : "/sasentry1/instrument/incident_wavelength",
"wavelengthUnits" : "/sasentry1/instrument/incident_wavelength@units",
"sampleName" : "/sasentry1/sample/name", # dataset
"title" : "/sasentry1/title", # dataset
"instrumentTitle" : "/sasentry1/instrument/title", # dataset
"timestamp" : "/sasentry1/sasdata1@timestamp", # attribute
"QUnits" : "/sasentry1/sasdata1/Q@units", # attribute
"IUnits" : "/sasentry1/sasdata1/I@units",
"IdevUnits" : "/sasentry1/sasdata1/Idev@units",
"QdevUnits" : "/sasentry1/sasdata1/Qdev@units",
}
######## ATTRIBUTES ARE DEFINED HERE #########
"""Fills the attribute dictionary with fields we need"""
_defaults = {
# /
"/@canSAS_class": "SASroot",
"/@default": "sasentry1",
# /sasentry1
"/sasentry1@NX_class": "NXentry",
"/sasentry1@canSAS_class": "SASentry",
"/sasentry1@version": "1.0",
# /sasentry/sasdata1
"/sasentry1/sasdata1@NX_class": "NXdata",
"/sasentry1/sasdata1@canSAS_class": "SASdata",
"/sasentry1/sasdata1@I_axes": "Q",
"/sasentry1/sasdata1@Q_indices": "0",
"/sasentry1/sasdata1@timestamp": "2019-01-30T16:19:54+00:00",
"/sasentry1/sasdata1@signal": "I",
# /sasentry1/sasdata1/I
"/sasentry1/sasdata1/I@units": "1/m/sr",
"/sasentry1/sasdata1/I@uncertainties": "Idev",
# /sasentry1/sasdata1/Idev
"/sasentry1/sasdata1/Idev@units": "1/m/sr",
# /sasentry1/sasdata1/Q
"/sasentry1/sasdata1/Q@units": "1/nm",
# /sasentry1/instrument
"/sasentry1/instrument@canSAS_class": "SASinstrument",
"/sasentry1/instrument@NX_class": "NXdata",
# /sasentry1/instrument/incident_wavelength
"/sasentry1/instrument/incident_wavelength@units": "nm",
# /sasentry1/sample
"/sasentry1/sample@canSAS_class": "SASsample",
"/sasentry1/sample@NX_class": "NXsample",
}
def __init__(self, filename = None, overwrite = False, directDict = {}, **kwargs):
"""
NeXusWriter is a flexible class for constructing, or adding to (HDF5-based)
NeXus files. It can be used using (a combination of) two mechanisms:
1. A mapping can be defined in the NeXusWriter.inputMapping dictionary, which
defines what additional input arguments should end up where in the NeXus file.
This can be convenient when using the NeXusWriter in other code which always
writes the same standard output or should be fed with user-intelligible
parameters,
and/or
2. A dictionary ("directDict") can be provided to the arguments of NeXusWriter,
with a pathKey (e.g. "/entry1/instrument/detector/data" for a dataset, or
"entry1/sample/transmission@units" for an attribute), and a value to associate
with it.
After the input arguments are handled, the defaults in the NeXusWriter._defaults
dictionary are filled in for all still missing entries.
NeXusWriter can be called with the following arguments:
* filename *: (required) a valid filename or Path object
* overwrite *: will delete and recreate if the file exists
* directDict *: A dictionary containing 'pathKey: value'-combinations, that adhere to the specifications in
the addAttribute and addDataset methods.
further keyword-value pairs can be defined later on in this function for default mappings.
See NeXusWriter._inputMapping and NeXusWriter._defaults dictionaries for the current defaults
"""
self._filename = filename
self._overwrite = overwrite
for key in self._requiredInputKeys:
assert key in kwargs.keys(), "{} must be provided".format(key)
# ensure we're good to go
self.validate()
self._dealWithInput(kwargs)
self._dealWithInput(directDict)
# find the attributes which haven't been defined yet, and fill them with the defaults
for key, value in self._defaults.items():
if (key in kwargs.keys()) or (key in directDict.keys()):
continue
else:
# place the remaining default attributes in the file:
self.addAttribute(pathKey = key, value = value)
def _dealWithInput(self, inputDict):
"""
deals with the input arguments to the class that need to be written to the NeXus structure.
inputDict must contain pathKey: value combinations, that adhere to the specifications in
the addAttribute and addDataset methods.
"""
# datasets must be set first, then attributes can be attached:
for datasetsOrAttributes in ['datasets', 'attributes']:
for key, value in inputDict.items():
if not key in self._inputMapping:
# print("input key {} not in input mapping, interpreting directly...")
kv = [key, value]
else:
kv = [self._inputMapping[key], value]
if ('@' in kv[0]) and (datasetsOrAttributes.lower() == 'attributes'): # we are dealing with an attribute here
# print('adding attribute {}'.format(kv[0]))
self.addAttribute(pathKey = kv[0], value = kv[1])
if not('@' in kv[0]) and (datasetsOrAttributes.lower() == 'datasets'): # we are dealing with an attribute here
# print('adding dataset {}'.format(kv[0]))
self.addDataset(pathKey = kv[0], value = kv[1])
def validate(self):
"""Generic validation method"""
assert self._filename is not None, "Output filename (filename) must be provided!"
if not self._overwrite:
assert not os.path.isfile(self._filename), "Filename cannot exist already"
else:
if os.path.isfile(self._filename):
os.remove(self._filename)
# additional checks can be put here too...
def addAttribute(self, pathKey = None, value = None):
"""
Adds attributes to a dataset or group in NeXus.
pathKey must be corresponding to:
/path/to/datasetOrGroup@attribute
value can be any valid dtype.
"""
# adapted from Structurize:
assert pathKey is not None, "HDF5 path/key combination cannot be empty"
print("adding attribute at pathKey {}".format(pathKey))
path, key = pathKey.rsplit("@", 1)
with h5py.File(self._filename, 'a') as h5f:
if not path in h5f:
print(" Location {} does not exist in output file".format(path))
return
# write attribute
h5f[path].attrs[key] = value
def addDataset(self, pathKey = None, value = None):
"""
Adds (or replaces) a dataset to a NeXus structure.
pathKey must be corresponding to:
/path/to/dataset
value can be any valid dtype.
- Lists are converted to np.arrays.
- Unicode string arrays are converted to h5py special_dtype '<U6'
"""
# adapted from McSAS3:
assert pathKey is not None, "HDF5 path/key combination cannot be empty"
print("adding dataset at pathKey {}".format(pathKey))
path, key = pathKey.rsplit('/', 1)
"""stores the settings in an output file (HDF5)"""
with h5py.File(self._filename, 'a') as h5f:
h5g = h5f.require_group(path)
# store arrays:
# convert all compatible data types to arrays:
if type(value) is tuple or type(value) is list:
value = np.array(value)
if value is not None and type(value) is np.ndarray:
# HDF cannot store unicode string arrays, these need to be stored as a special type:
if value.dtype == '<U6':
value = value.astype(h5py.special_dtype(vlen=str))
# store the data in the prefiously defined group:
h5g.require_dataset(key, data = value, shape = value.shape, dtype = value.dtype, compression = "gzip")
# non-array values are stored here:
elif value is not None:
# try and see if the destination already exists.. This can be done by require_dataset, but that requires shape and dtype to be specified. This method doesn't:
dset = h5g.get(key, None)
if dset is None:
h5g.create_dataset(key, data = value)
else:
dset[()] = value
| 10,045 | 44.252252 | 174 | py |
SPONGE | SPONGE-main/stlfunctions.py | # -*- coding: utf-8 -*-
import vtk
def getSTLReader(filename):
# sets up a read function with the STL filename in place.
# Get an STL reader (https://pyscience.wordpress.com/2014/09/21/ray-casting-with-python-and-vtk-intersecting-linesrays-with-surface-meshes/):
readerSTL = vtk.vtkSTLReader()
readerSTL.SetFileName(filename)
# 'update' the reader i.e. read the .stl file
readerSTL.Update()
return readerSTL
def STLToPolydata(filename):
# converts the STL data to polydata, which is what we need.
readerSTL = getSTLReader(filename)
polydata = readerSTL.GetOutput()
# If there are no points in 'vtkPolyData' something went wrong
if polydata.GetNumberOfPoints() == 0:
raise ValueError(
"No point data could be loaded from '" + filename)
return None
# be nice:
polydata.ComputeBounds()
return polydata
| 895 | 32.185185 | 145 | py |
SPONGE | SPONGE-main/calcfunctions.py | # -*- coding: utf-8 -*-
# use in an instance with VTK!
import vtk
import numpy as np
import scipy.spatial
import logging
def polydataToMass(polydata):
# returns the object volume
Mass = vtk.vtkMassProperties()
Mass.SetInputData(polydata)
Mass.Update()
return Mass.GetVolume()
def polydataToSurface(polydata):
# returns the surface area
Mass = vtk.vtkMassProperties()
Mass.SetInputData(polydata)
Mass.Update()
return Mass.GetSurfaceArea()
def pickPointsInMeshV2(mesh, nPoints = 1000):
# choose points within the boundaries. These points are then (double)checked whether
# they lie inside or outside the object. Points outside the object are discarded.
# this process is repeated until nPoints have been found inside.
# Find the limits of the mesh:
mesh.ComputeBounds() # already done upon STL read
(xMin, xMax, yMin, yMax, zMin, zMax) = mesh.GetBounds()
# print("Limits: x: {}, {}, y: {}, {}, z: {}, {}".format(xMin, xMax, yMin, yMax, zMin, zMax))
nFound = 0
pts = []
TPCoord = np.zeros([nPoints, 3]) # test block
# inCoord = np.zeros([nPoints, 3]) # final point set
while (nFound < nPoints):
# generate a block of points to test
TPCoord[:, 0] = np.random.uniform(low = xMin, high = xMax, size = nPoints)
TPCoord[:, 1] = np.random.uniform(low = yMin, high = yMax, size = nPoints)
TPCoord[:, 2] = np.random.uniform(low = zMin, high = zMax, size = nPoints)
# add to vPts object:
# version using vtk points object:
TPts = vtk.vtkPoints()
TPts.SetDataType(vtk.VTK_DOUBLE)
dummy = [TPts.InsertNextPoint([TPCoord[j, 0], TPCoord[j, 1], TPCoord[j, 2]]) for j in range(nPoints)]
chkPts = vtk.vtkPolyData()
chkPts.SetPoints(TPts)
# set up location checker, parts of this may be moved outside loop later:
sel = vtk.vtkSelectEnclosedPoints()
sel.SetInputData(chkPts)
sel.SetSurfaceData(mesh)
sel.CheckSurfaceOn()
sel.Update()
pointi = [] # new list
j = 0
while (nFound < nPoints) and (j < nPoints):
if sel.IsInside(j):
pointi.append(j)
nFound += 1
j+=1
# add to final set:
[pts.append(chkPts.GetPoint(j)) for j in pointi]
# for j in pointi:
# inCoord[j,:] = chkPts.GetPoint(j)
# print("{} points found of requested {}".format(nFound, nPoints))
return pts
def pointsToScatter(q, points, memSave = False):
# calculate the distance matrix between points, using a fast scipy function.
# This scipy function returns only unique distances, so only one distance
# value is returned for point1-point2 and point2-point1 combinations. It also
# removes the zero distances between point1-point1.
# we then calculate the scattering using the Debye equation.
points = np.array(points)
dist = scipy.spatial.distance.pdist(points, metric = "euclidean")
if not memSave:
inter = np.outer(np.abs(dist), q)
# definition of np.sinc contains an additional factor pi, so we divide by pi.
# I = 2 * (np.sinc(inter / np.pi)).sum(axis=0) / points.size**2
# prefactor should be 4 \pi.. perhaps. -> let's not for now. makes abs intensity scaling easier later
I = (np.sinc(inter / np.pi)).mean(axis=0) # / dist.size
else:
I = np.empty(q.shape)
I.fill(np.nan) # initialize as nan
for qi, qval in enumerate(q):
I[qi] = np.sinc(dist * qval / np.pi).mean() # / dist.size
return I # , dist
def logEdges(dist, qmin, qmax, nq):
"""
Calculates the optimal histogramming bin edges for pointsToScatterD, based on input arguments:
* dist *: the point-to-point distance list from scipy.spatial.distance.pdists
* qmin *: the minimum requested Q value
* qmax *: the maximum requested Q value
* nq *: the number of requested q points
"""
logging.error('This "fast method"-functionality is depreciated, please use Debyer for this approach')
# # core logEdges:
# logEdges = np.logspace(
# np.log10(2 * np.pi / qmax),
# np.log10(2 * np.pi / qmin),
# nq)
# # if there are distances below:
# if (dist.min() < (2 * np.pi / qmax)):
# logEdges = np.concatenate(
# [np.logspace(np.log10(dist.min()),np.log10(2 * np.pi / qmax),10, endpoint = False),
# logEdges]
# )
# # if there are distances above:
# if (dist.max() > (2 * np.pi / qmin)):
# logEdges = np.concatenate(
# [logEdges, np.logspace(np.log10(2 * np.pi / qmin), np.log10(dist.max()),11, endpoint = True)[1:]
# ]
# )
# return logEdges
def pointsToScatterD(q, points, memSave = False):
""" Calculate the scattering intensity from an array of points, by histogramming first.
This should be the quicker -- but potentially riskier -- method of calculating the
scattering intensity compared to the original pointsToScatter function. """
logging.error('This "fast method"-functionality is depreciated, please use Debyer for this approach')
# points = np.array(points)
# dist = scipy.spatial.distance.pdist(points, metric = "euclidean")
# lEdges = logEdges(dist, q.min(), q.max(), q.size)
# dlog, elog = np.histogram(dist, bins = lEdges, density = False)
# de = np.diff(elog) / 2 + elog[:-1] # middle distance of a given bin
# # dle = dlog / np.diff(elog) # normalized fraction of contributions in a given bin
# I2 = np.empty(q.shape)
# I2.fill(np.nan) # initialize as nan
# for qi, qval in enumerate(q):
# I2[qi] = 4 * np.pi * (dlog * np.sinc(de * qval / np.pi)).sum() / points.size**2
# return I2
| 5,850 | 38.802721 | 111 | py |
SPONGE | SPONGE-main/plotfunctions.py | import matplotlib
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
def simPlot(dataset, RayleighR = None, title = None):
plt.errorbar(dataset.Q, dataset.I * 10. / 1.1, dataset.IError * 10. / 1.1, label = title)
if RayleighR is not None:
q = dataset.Q
f = 3. * (np.sin(q*RayleighR) - q*RayleighR * np.cos(q*RayleighR)) / ((q*RayleighR)**3.) # Rayleigh form factor
plt.loglog(q, f**2, label = "Rayleigh function, R = {}".format(RayleighR))
plt.xscale("log")
plt.yscale("log")
plt.grid("on")
plt.legend(loc = 0)
plt.xlabel("q (1/nm)")
plt.ylabel("I (A.U.)")
| 645 | 33 | 119 | py |
SPONGE | SPONGE-main/smearfunctions.py | # -*- coding: utf-8 -*-
import vtk
import pandas, scipy
import numpy as np
import scipy.spatial
import scipy.signal
import os, h5py
import scipy.stats # for the distributions
from scipy.interpolate import interp1d # for the interpolations
from scipy.stats import norm # gaussian distribution function
from .distfunctions import interpolate
def halfTrapzPDF(x, c, d):
# this trapezoidal PDF is only defined from X >= 0, and is assumed
# to be mirrored around that point.
# Note that the integral of this PDF from X>0 will be 0.5.
# source: van Dorp and Kotz, Metrika 2003, eq (1)
# using a = -d, b = -c
print("halfTrapzPDF called")
assert(d > 0.)
x = abs(x)
pdf = x * 0.
pdf[x < c] = 1.
if d > c:
pdf[(c <= x) & (x < d)] = (1./(d - c)) * (d - x[(c <= x) & (x < d)])
norm = 1./(d + c)
pdf *= norm
return pdf, norm
def slitSmearTrapz(simDat, halfUmbra = 0.01, halfPenUmbra = 0.1, ndiv = 25):
# this smears the profile according to slit smearing. It requires the input
# of accurate beam profile parameters, AND it requires that the input simulated data
# has a decent Guinier region at low-q (due to mirroring).
lims = [-halfPenUmbra, halfPenUmbra]
x = np.linspace(lims[0], lims[1], ndiv)
g, dummy = halfTrapzPDF(x, halfUmbra, halfPenUmbra)
# for the numerical integration, we need to temporarily move away from the Pandas Dataframes
I = np.zeros([simDat.Q.values.size, g.size])
IError = np.zeros([simDat.Q.values.size, g.size])
for oi, offset in enumerate(x):
dst = interpolate(simDat, QModifier = offset, modType = "slitSmear", mirror = True)
# assign result into the to-be-integrated matrices
I[:,oi] = dst.I.values * g[oi]
IError[:,oi] = dst.IError.values * g[oi]
print("Shape I: {}, shape x: {}".format(I.shape, x.shape))
IInt = np.trapz(I, x = x[np.newaxis, :], axis = 1)
IErrorInt = np.trapz(IError, x = x[np.newaxis, :], axis = 1)
addDat = simDat.copy()
addDat["Q"] = simDat["Q"] # reset Q
addDat.I = IInt
addDat.IError = IError
return x, g, addDat
| 2,163 | 33.903226 | 96 | py |
SPONGE | SPONGE-main/__init__.py | # -*- coding: utf-8 -*-
# __init__.py
"""
General imports of sponge
"""
__author__ = "Brian R. Pauw"
__contact__ = "brian@stack.nl"
__license__ = "GPLv3+"
__copyright__ = "Bundesanstalt für Materialforschung und -Prüfung"
__date__ = "2019-06-21"
__status__ = "beta"
version = "17"
__all__ = []
from .s import s
from .plotfunctions import simPlot
from .calcfunctions import pickPointsInMeshV2, pointsToScatter, pointsToScatterD, logEdges, polydataToMass
from .stlfunctions import getSTLReader, STLToPolydata
from .distfunctions import interpolate, distSpreadGaussian
from .smearfunctions import halfTrapzPDF, slitSmearTrapz
def argparser():
parser = argparse.ArgumentParser(description = """
Simulates small-angle scattering patterns from
STL-descriptions of object surfaces. Can include
polydispersity in size (uniformly scaling in all dimensions)
Cobbled together by Brian R. Pauw.
Released under a GPLv3+ license.
""")
parser.add_argument("-f", "--efName", type = str, default = None,
help = "Path to excel filename containing the sim settings")
parser.add_argument("-g", "--group", type = str, default = None,
help = "simulation group to work on")
return parser.parse_args()
if __name__ == "__main__":
#manager=pyplot.get_current_fig_manager()
#print manager
#process input arguments
adict = argparser()
#run the program, scotty! I want a kwargs object, so convert args:
adict = vars(adict)
sponge.sponge(**adict) #and expand to kwargs
# vim: set ts=4 sts=4 sw=4 tw=0:
| 1,625 | 32.183673 | 106 | py |
SPONGE | SPONGE-main/s.py | # -*- coding: utf-8 -*-
import pandas
import numpy as np
import argparse
import datetime
import os
from .nexuswriter import NeXusWriter
from pathlib import Path
# from plotfunctions import simPlot
from . import calcfunctions
# from calcfunctions import pickPointsInMeshV2, pointsToScatter, pointsToScatterD, logEdges, polydataToMass
from . import stlfunctions
# from stlfunctions import getSTLReader, STLToPolydata
# from .distfunctions import interpolate, distSpreadGaussian
# from .smearfunctions import halfTrapzPDF, slitSmearTrapz
import multiprocessing
# from multiprocessing import Pool
class s(object):
# generates an instance of Sponge
resultDict = None # results in dictionary form
volumes = None # volumes of the simulated objects
surfaceAreas = None # surface areas of the simulated objects
simdata = None # Q, I, IError
IAtQMin = None # I at smallest q, not volume-square compensated, should be close to 1 or you didn't simulate to low enough q
_SLDs = None
_volumefraction = None
def __init__(self, numProcesses = 1, run=True, **kwargs):
"""
can be run either using an excel file as input or for direct simulation of a single file.
"""
# reset everything for a new instance
self.resultDict = [] # results in dictionary form
self.volumes = [] # volumes of the simulated objects
self.surfaceAreas = [] # surface areas of the simulated objects
self.simdata = [] # Q, I, IError
self.IAtQMin = [] # I at smallest q, not volume-square compensated, should be close to 1 or you didn't simulate to low enough q
# array of SLDs for each of the phases, also for multiphase modelling:
# in the order of [SLD(phase1), SLD(phase2), ..., SLD(dispersant)]).
# For single-phase, just have [SLD(particle), SLD(dispersant)]
self._SLDs = [1., 0.]
self._volumefraction = 1. # volume fraction of scatterers
assert (kwargs['efName'] is not None) or (kwargs['filename'] is not None), 'either an excel file with a list of simulations or a single STL file should be provided'
if kwargs['efName'] is not None:
# run in the traditional way:
if run:
assert kwargs['efName'] is not None, 'Path to excel filename with the simulation settings must be provided'
assert kwargs['group'] is not None, 'simulation group must be specified'
Tests = self.loadTests(kwargs['efName'])
self.resultDict = self.runTests(Tests = Tests, group = kwargs['group'], numProcesses = numProcesses)
else: # we run this in the single-file way:
if run:
Tests = self.genTest(kwargs)
resultDict = self.runTests(Tests=Tests, group = 'default',numProcesses = numProcesses)
def loadTests(self, efName):
efName = Path(efName) # it's ok if this is done multiple times...
Tests = pandas.DataFrame() # CLEARS TESTS!
# efName = Path('Martin/Martin.xlsx')
projectdirectory = efName.parent
excelFilename = efName.name
Tests = pandas.read_excel(efName, skiprows = 1)
Tests = Tests.dropna(axis = 0, how = "all") #remove empty rows for cleaning up.
Tests.columns = Tests.columns.str.lower() # lower case column names only
# os.chdir(kansas)
# cast to the right datatypes:
Tests = Tests.astype({
"npoints":"int",
"nq":"int",
"nrep":"int",
"memsave":"bool",
# "fastmethod":"bool",
"qmin": "float",
"qmax": "float",
"mu": "float",
"sigma": "float",
})
# Tests['fastmethod'] = False # no longer implemented...
Tests["projectdirectory"] = projectdirectory # add a project directory to all the entries
return Tests
def genTest(self, pars):
"""Similar to loadTests, but this is generating test parameters for a single simulation based on input parameters"""
# fill defaults:
Tests = pandas.DataFrame(
data={
"testgroup": "default",
"filename": Path('.'),
"projectdirectory": Path('.'),
"ofname": Path('default.out'),
"npoints":1000,
"nq":200,
"nrep":100,
"memsave":True,
# "fastmethod":False, # no longer implemented
"qmin": 0.01,
"qmax": 2,
"mu": 1,
"sigma": 0.01,
},
index = [0],
)
for key, val in pars.items():
if key.lower() in ['filename', 'ofname', 'projectdirectory']:
val = Path(val)
print(f'Changing parameter: {key.lower()} to value: {val}')
Tests[key.lower()] = val
# cast to the right datatypes:
Tests = Tests.astype({
# "filename": Path,
# "ofname": Path,
# "projectdirectory": Path,
"npoints":"int",
"nq":"int",
"nrep":"int",
"memsave":"bool",
"qmin": "float",
"qmax": "float",
"mu": "float",
"sigma": "float",
})
return Tests
def singleRun(self, parameters):
""" Starts a single calculation based on the provided parameters """
q = np.logspace(
np.log10(parameters["qmin"]),
np.log10(parameters["qmax"]),
parameters["nq"])
mesh = stlfunctions.STLToPolydata(Path(parameters["projectdirectory"], parameters["filename"]).as_posix())
pts = calcfunctions.pickPointsInMeshV2(mesh, parameters["npoints"])
# make sure we don't get too negative.
scaler = -1.0
while (scaler < 0):
scaler = np.random.normal(loc = parameters["mu"], scale = parameters["sigma"]) # scaling factor to apply to the shape
# if not parameters["fastmethod"]: # fastMethod depreciated
# this is the slowest I think so far. 6s with memmsave, 25s without (memory IO problem)
I = calcfunctions.pointsToScatter(q * scaler, pts, parameters["memsave"])
# else:
# I = calcfunctions.pointsToScatterD(q * scaler, pts, parameters["memsave"])
vol = calcfunctions.polydataToMass(mesh) * scaler**3 # I think this is correct with the scaler
# bonus surface area (for surface-to-volume ratios):
surf = calcfunctions.polydataToSurface(mesh) * scaler **2
return I, vol, surf
def multiRun(self, parameters, numProcesses = None):
if "sld" in parameters:
self._SLD = list(parameters["sld"])
if "volumefraction" in parameters:
self._volumefraction = float(parameters["volumefraction"])
q = np.logspace(
np.log10(parameters["qmin"]),
np.log10(parameters["qmax"]),
parameters["nq"])
if numProcesses is None:
nump = multiprocessing.cpu_count()
else:
assert isinstance(numProcesses, int)
nump = np.minimum(multiprocessing.cpu_count(), numProcesses)
if nump > 1:
# multiprocessing
Pool = multiprocessing.Pool(processes = nump)
mapParam = [parameters for i in range(int(parameters["nrep"]))]
rawData = Pool.map(self.singleRun, mapParam)
Pool.close()
Pool.join()
else:
# single threaded:
rawData = []
for _ in range(int(parameters["nrep"])):
rawData.append(self.singleRun(parameters))
# pick apart intensities and volume outputs:
rawDataI = []
rawIAtQMin = [] # this should be close to 1, otherwise the sim wasn't done to low enough q to ge ta good Guinier
rawDataV = []
rawDataS = [] # surface area
for item in rawData:
# note: we are volume-weighting the intensity here!
# if you want "standard" number-weighted intensity, then multiply not by item[1] but item[1]**2
rawDataI.append(item[0] * item[1] * self._volumefraction * (self._SLDs[-1] - self._SLDs[0])**2) # =Fsq/V of SasView-style (intensity multiplied by volume-square, then divided by volume once). Still should be multiplied by deltaSLD**2 to go to abs units, scale will be volume fraction if obtained intensity from the calculation converges to 1 at q=0
rawIAtQMin.append(item[0][0])
# we have surfaces available at item[2] if we want.
rawDataV.append(item[1]) # volumes that come out are actual volumes (e.g. for sphere = 4/3 pi r^3)
rawDataS.append(item[2]) # volumes that come out are actual volumes (e.g. for sphere = 4/3 pi r^3)
rawDataI = np.array(rawDataI)
self.IAtQMin = np.array(rawIAtQMin)
self.volumes = np.array(rawDataV)
self.surfaceAreas = np.array(rawDataS)
data = pandas.DataFrame({"Q": q})
data["I"] = rawDataI.mean(axis = 0) # * rawDataV.mean() # second half of scaling, first half is done in I.
data["IError"] = rawDataI.std(axis = 0, ddof = 1) / np.sqrt(rawDataI.shape[0]) # * rawDataV.mean()
# also need to save volumes and surfaces somewhere.
self.simdata = data
if parameters["ofname"] is not None:
Path(parameters["projectdirectory"],parameters["ofname"]).parent.mkdir(parents=True, exist_ok=True)
data.to_csv(Path(parameters["projectdirectory"],parameters["ofname"]).as_posix(), header = False, sep = ';', index = False)
return {"data" : data,
"parameters" : parameters}
def runTests(self, Tests = None, start = 0, stop = None, group = None, numProcesses = None):
"""Runs a series of simulations as defined in the Tests loaded from the excel files"""
resultDict = {}
print(Tests)
if stop is None:
testindices = Tests.index.values
if group is not None:
testindices = Tests[Tests.testgroup == group].index.tolist() # old: .values
for tn, testindex in enumerate(testindices):
print("Test: {} of {}".format(tn + 1, len(testindices)))
param = Tests.loc[testindex]
try:
del res
except NameError:
pass
except:
raise
res = self.multiRun(param, numProcesses = numProcesses)
self.storeResult(res["parameters"], res["data"])
resultDict.update({testindex: res})
return resultDict
def storeResult(self, parameters, data):
"""Stores the result in a NeXus structure... """
directDict = {}
for rdKey, rdValue in parameters.items():
if rdKey == 'data':
continue # skip this, not part of the metadata
directDict.update(
{'/sasentry1/simulationParameters/{}'.format(rdKey) : '{}'.format(rdValue)}
)
for rdKey in [
'volumes',
'surfaceAreas',
'simdata',
'IAtQMin',
]:
directDict.update(
{f'/sasentry1/simulationMetaValues/{rdKey}' : f'{getattr(self, rdKey)}'}
)
tp = Path(parameters['projectdirectory'], parameters['ofname'])
NeXusWriter(
filename = tp.with_suffix('.nxs'),
Q = data['Q'],
I = data['I'],
IError = data['IError'],
wavelength = 0.1542,
wavelengthUnits = 'nm',
title = 'Simulated data for testgroup: {}, file: {}'.format(
parameters['testgroup'], parameters['filename']
),
timestamp = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).replace(microsecond=0).isoformat(),
overwrite = True,
directDict = directDict,
)
# To move the resulting intensity to absolute units, do the following:
# Multiply the intensity by the square of the SLD contrast between object and matrix
# multiply this with the volume fraction of the object in solution | 12,441 | 43.120567 | 360 | py |
SPONGE | SPONGE-main/distfunctions.py | # -*- coding: utf-8 -*-
import vtk
import pandas, scipy
import numpy as np
import scipy.spatial
import scipy.signal
import os, h5py
import scipy.stats # for the distributions
from scipy.interpolate import interp1d # for the interpolations
from scipy.stats import norm # gaussian distribution function
def distSpreadGaussian(simDat, sigma = 0.01, ndiv = 10):
# we program a dilation-/contraction-type polydispersity in here.
lims = scipy.stats.distributions.norm.ppf([0.05, 0.95], loc = 1, scale = sigma)
x = np.linspace(lims[0], lims[1], ndiv)
g = scipy.stats.distributions.norm.pdf(x, loc = 1, scale = sigma)
addDat = simDat * 0 # initialize as zero
for oi, offset in enumerate(x):
dst = interpolate(simDat, QModifier = offset, modType = "multiply")
addDat += dst * g[oi] # / g.sum()
addDat["Q"] = simDat["Q"]
return x, g, addDat
def interpolate(dataset, QModifier = 1., modType = "multiply", mirror = False):
# function modified from imp2
# helper function to take care of interpolating the intensity within the limits
# can do scaling (dilation, contraction) for size distributions, or shifting for
# beam smearing calculations. For the latter, it is recommended to mirror the
# intensity to negative Q values, using mirror = True. This only works for
# intensity profiles that are reasonably flat at q < qmin!
# A third option is modType = "slitSmear", which calculates using
# Q = np.sqrt(Q**2 + qModifier**2)
dst = pandas.DataFrame()
dst["Q"] = dataset["Q"].copy()
dst["I"] = np.full(dataset["Q"].shape, np.nan) # initialize as nan
dst["IError"] = np.full(dataset["Q"].shape, np.nan)
dst["Mask"] = np.zeros(dataset["Q"].shape, dtype = bool) # none masked
if mirror:
dataset = (dataset.iloc[::-1]).append(dataset, ignore_index = True)
dataset.Q[0:dst.Q.size] *= -1
if modType == "multiply":
newQ = dataset["Q"] * QModifier
elif modType == "shift":
newQ = dataset["Q"] + QModifier
elif modType == "slitSmear":
newQ = np.sign(dataset.Q) * np.sqrt(dataset.Q**2 - QModifier**2)
elif modType == "invSlitSmear":
newQ = np.sign(dataset.Q) * np.sqrt(dataset.Q**2 + QModifier**2)
else:
print("modType not understood, should be 'multiply' or 'shift'.")
# interpolator (linear) to equalize Q.
fI = interp1d(newQ, dataset["I"],
kind = "linear", bounds_error = False)
fE = interp1d(newQ, dataset["IError"],
kind = "linear", bounds_error = False)
# interpolate, rely on Mask to deliver final limits
dst["I"] = fI(dst["Q"])
dst["IError"] = fE(dst["Q"])
# extra mask clip based on I or IError values:
dst["Mask"] |= dst["I"].isnull()
dst["Mask"] |= dst["IError"].isnull()
return dst
| 2,846 | 38 | 84 | py |
SPONGE | SPONGE-main/test_integraltest.py | import unittest
import pandas, os
from s import s
# some tests to ensure the functionality persists. This bypasses main.py, and directly addresses s
import warnings
import numpy as np
class testSponge(unittest.TestCase):
def testSponge(self):
# new instance:
spongeInstance = s(run=False)
# let's set up a set of testing parameters:
tests = pandas.DataFrame(data= {
'testgroup':'unittest',
'filename': 'test/models/spheres/SphereR1F200.stl',
'qmin': 0.1,
'qmax': 160,
'nq': 400,
'nrep': 10, # normally 100
'npoints': 1000, # for testing only, you can go up to 32k, memory permitting
'mu': 1., # mean scaling factor
'sigma': 0., # (gaussian distribution width), now vol-weighted!
'memsave': True, # speeds things up a little
'ofname': 'test/simdata/sphere_unittest.dat',
'projectdirectory': os.getcwd(),
}, index = [0])
res = spongeInstance.multiRun(tests.loc[0], numProcesses = 1) # numProcesses=1 runs without multiprocessing
spongeInstance.storeResult(res["parameters"], res["data"])
np.testing.assert_allclose(spongeInstance.IAtQMin, 0.998, atol = 0.001)
np.testing.assert_allclose(spongeInstance.volumes, 4.19, atol = 0.01)
spongeInstance2 = s(run=False)
# let's set up a set of testing parameters:
tests = pandas.DataFrame(data= {
'testgroup':'unittest',
'filename': 'test/models/spheres/SphereR3F200.stl',
'qmin': 0.1,
'qmax': 160,
'nq': 400,
'nrep': 10, # normally 100
'npoints': 1000, # for testing only, you can go up to 32k, memory permitting
'mu': 1., # mean scaling factor
'sigma': 0., # (gaussian distribution width), now vol-weighted!
'memsave': True, # speeds things up a little
'ofname': 'test/simdata/sphereR3_unittest.dat',
'projectdirectory': os.getcwd(),
}, index = [0])
res = spongeInstance2.multiRun(tests.loc[0], numProcesses = 2) # numProcesses=1 runs without multiprocessing
spongeInstance2.storeResult(res["parameters"], res["data"])
np.testing.assert_allclose(spongeInstance2.IAtQMin, 0.982, atol = 0.002)
np.testing.assert_allclose(spongeInstance2.volumes, 113.1, atol = 0.1)
spongeInstance3 = s(run=False)
# let's set up a set of testing parameters:
tests = pandas.DataFrame(data= {
'testgroup':'unittest',
'filename': 'test/models/spheres/SphereR0p5F200.stl',
'qmin': 0.1,
'qmax': 160,
'nq': 400,
'nrep': 10, # normally 100
'npoints': 1000, # for testing only, you can go up to 32k, memory permitting
'mu': 1., # mean scaling factor
'sigma': 0., # (gaussian distribution width), now vol-weighted!
'memsave': True, # speeds things up a little
'ofname': 'test/simdata/sphereR0p5_unittest.dat',
'projectdirectory': os.getcwd(),
}, index = [0])
res = spongeInstance3.multiRun(tests.loc[0], numProcesses = 2) # numProcesses=1 runs without multiprocessing
spongeInstance3.storeResult(res["parameters"], res["data"])
np.testing.assert_allclose(spongeInstance3.IAtQMin, 0.999, atol = 0.001)
np.testing.assert_allclose(spongeInstance3.volumes, 0.524, atol = 0.001)
if __name__ == "__main__":
unittest.main() | 3,577 | 46.078947 | 116 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/world_scaler.py | from collections import defaultdict
from gym_partially_observable_grid.envs import PartiallyObservableWorld
def parse_file(path_to_file):
content = defaultdict(list)
file = open(path_to_file, 'r')
current_section = None
sections = ['Layout', 'Abstraction', 'Behaviour', 'Rewards']
for line in file.readlines():
line = line.strip()
if not line or line.startswith('//'):
continue
is_header = False
for section_name in sections:
if section_name in line:
current_section = section_name
is_header = True
if is_header:
continue
content[current_section].append(line)
layout = [list(line) for line in content['Layout']]
abstraction_layout = [list(line) for line in content['Abstraction']][:len(layout)]
abstraction_mappings = ''
for am in content['Abstraction'][len(layout):]:
abstraction_mappings += f'{am}\n'
behaviour_layout = [list(line) for line in content['Behaviour']][:len(layout)]
behaviour_mapping = ''
for rm in content['Behaviour'][len(layout):]:
behaviour_mapping += f'{rm}\n'
rewards_layout = [list(line) for line in content['Rewards']][:len(layout)]
rewards_mappings = ''
for rm in content['Rewards'][len(layout):]:
rewards_mappings += f'{rm}\n'
for l in [layout, abstraction_layout, behaviour_layout, rewards_layout]:
if not l:
continue
l.pop(0)
l.pop()
for line in l:
line.pop(0)
line.pop()
return layout, abstraction_layout, abstraction_mappings, behaviour_layout, behaviour_mapping, rewards_layout, rewards_mappings
def create_world(file_name, parsed_values, repeat_x, repeat_y):
layout, abstraction_layout, abstraction_mappings, rules_layout, rules_mappings, rewards_layout, rewards_mappings = parsed_values
layout_world = []
abstract_world = []
behaviour_world = []
rewards_world = []
for world, word_list in [(layout_world, layout), (abstract_world, abstraction_layout),
(behaviour_world, rules_layout), (rewards_world, rewards_layout)]:
if not word_list:
continue
curr_row = 0
last_row = repeat_y * len(layout) - 1
for r_y in range(repeat_y * len(layout)):
line = []
for x in range(repeat_x):
line.extend(word_list[curr_row % len(layout)])
world.append(line)
curr_row += 1
for line in world:
line.insert(0, '#')
line.append('#')
e_set, last_g_location = False, None
for x, line in enumerate(layout_world):
for y, tile in enumerate(line):
if tile == 'G':
last_g_location = (x, y)
for x, line in enumerate(layout_world):
for y, tile in enumerate(line):
if tile == 'E':
if not e_set:
e_set = True
continue
else:
layout_world[x][y] = ' '
if tile == 'G' and (x, y) != last_g_location:
layout_world[x][y] = ' '
top_bottom_line = ['#' for _ in range(len(world[0]))]
world.insert(0, top_bottom_line)
world.append(top_bottom_line)
with open(file_name, 'w') as file:
file.write('===Layout===\n\n')
for l in layout_world:
file.write(''.join(l)+'\n')
file.write('\n')
if abstract_world:
file.write('===Abstraction===\n\n')
for l in abstract_world:
file.write(''.join(l)+'\n')
file.write('\n' + abstraction_mappings + '\n')
if behaviour_world:
file.write('===Behaviour===\n\n')
for l in behaviour_world:
file.write(''.join(l)+'\n')
file.write('\n' + rules_mappings + '\n')
if rewards_world:
file.write('===Rewards===\n\n')
for l in rewards_world:
file.write(''.join(l)+'\n')
file.write('\n' + rewards_mappings + '\n')
if __name__ == '__main__':
p = parse_file('worlds/unsafe_world1.txt')
create_world('worlds/scaled_unsafe_world1.txt', p, 3, 2)
| 4,346 | 31.440299 | 132 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/active_passive_learning.py | import random
from collections import Counter
from statistics import mean
from time import sleep
import gym
import gym_partially_observable_grid
from aalpy.base import SUL
from aalpy.learning_algs import run_active_Alergia
from aalpy.learning_algs.stochastic_passive.ActiveAleriga import Sampler
from aalpy.utils import visualize_automaton, save_automaton_to_file, load_automaton_from_file
from tempest_shields import TempestInterface
from utils import StochasticWorldSUL, test_model_with_tempest
class SafeSampler(Sampler):
def __init__(self, input_al, eps=0.9, num_new_samples=2000, min_seq_len=10, max_seq_len=50):
self.eps = eps
self.new_samples = num_new_samples
self.input_al = input_al
self.min_seq_len = min_seq_len
self.max_seq_len = max_seq_len
def sample(self, sul, model):
# Here you get a current model
# visualize_automaton(model)
# TODO extract unsafe states from the model
unsafe_state_ids = set()
for s in model.states:
if s.output in {'death', 'unsafe1', 'unsafe2'}:
unsafe_state_ids.add(s.output)
new_data = []
# I guess you can keep the overall structure the same (reset, step,...)
tempest_interface = TempestInterface("GOAL", model, None, "death")
completely_random = True if tempest_interface.property_val < 0.5 else False
for _ in range(self.new_samples):
sample = ['Init']
sul.pre()
tempest_interface.reset()
continue_random = completely_random
for _ in range(random.randint(self.min_seq_len, self.max_seq_len)):
if not continue_random and random.random() < self.eps:
# TODO get a step from tempest
i = tempest_interface.get_input()
if i is None:
i = random.choice(self.input_al)
else:
i = random.choice(self.input_al)
o = sul.step(i)
sample.append((i, o))
# if observed output is not reachable in the model
continue_random = not tempest_interface.step_to(i, o)
sul.post()
new_data.append(sample)
return new_data
def get_initial_data(sul, input_al, initial_sample_num=5000, min_seq_len=10, max_seq_len=50):
# Generate random initial samples
random_samples = []
for _ in range(initial_sample_num):
sample = ['Init']
sul.pre()
for _ in range(random.randint(min_seq_len, max_seq_len)):
i = random.choice(input_al)
o = sul.step(i)
sample.append((i, o))
sul.post()
random_samples.append(sample)
return random_samples
# Make environment deterministic even if it is stochastic
force_determinism = False
# Add slip to the observation set (action failed)
indicate_slip = True
# Use abstraction/partial observability. If set to False, (x,y) coordinates will be used as outputs
is_partially_obs = True
min_seq_len, max_seq_len = 10, 50
world = gym.make(id='poge-v1',
world_file_path='worlds/unsafe_world1.txt',
force_determinism=force_determinism,
indicate_slip=indicate_slip,
is_partially_obs=is_partially_obs,
one_time_rewards=True)
input_al = list(world.actions_dict.keys())
sul = StochasticWorldSUL(world)
data = get_initial_data(sul, input_al, initial_sample_num=10000, min_seq_len=min_seq_len, max_seq_len=max_seq_len)
sampler = SafeSampler(input_al, eps=0.9, num_new_samples=2000, min_seq_len=min_seq_len, max_seq_len=max_seq_len)
final_model = run_active_Alergia(data=data, sul=sul, sampler=sampler, n_iter=5)
# final_model = load_automaton_from_file('passive_active.dot', automaton_type='mdp')
print(f'Final model size: {final_model.size}')
# save_automaton_to_file(final_model, 'passive_active')
test_model_with_tempest(final_model, sul, input_al, num_episodes=100)
| 4,041 | 34.45614 | 114 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/tempest_shields.py | from pathlib import Path
import os
from aalpy.utils import mdp_2_prism_format
#tempest_binary = "/home/msi/tempest-shields/build/bin/storm"
tempest_binary = "/home/stefan/projects/tempest-public/build/bin/storm"
debug = False
input_cardinality = 4
inputs = ["left", "right", "up", "down"]
class TempestInterface:
def __init__(self, dest, model, num_steps=None, safety=None, threshold=None):
self.tmp_dir = Path("tmp")
self.dest = dest
self.safety = safety
self.model = model
self.model_dict = None
self.num_steps = num_steps
self.tmp_mdp_file = (self.tmp_dir / f"po_rl_{dest}.prism")
self.tmp_properties_file = (self.tmp_dir / f"po_rl_{dest}.prop")
# self.tmp_prop_file = f"{self.tmp_dir_name}/po_rl.props"
self.current_state = None
self.tmp_dir.mkdir(exist_ok=True)
self.introduce_self_loops()
mdp_2_prism_format(self.model, "porl", output_path=self.tmp_mdp_file)
self.property_val = 0
self.threshold = threshold
self.safety_shield = None # = TempestShieldParser(f"safety_shield_{self.safety}.shield")
self.reachability_probabilities = None #TempestShieldParser(f"eventually_shield_{self.dest}.shield")
self.call_tempest()
def introduce_self_loops(self):
for state in self.model.states:
if len(state.transitions) != input_cardinality:
for inp in inputs:
if not inp in state.transitions:
state.transitions[inp] = [(state, 1.0)]
def create_safety_shielding_property(self):
assert self.threshold is not None
if not self.num_steps:
prop = f"<safety_shield_{self.safety}, PreSafety, lambda={self.threshold}> <<robot>> Pmax=?[G !\"{self.safety}\"];"
else:
prop = f'<safety_shield_{self.safety}, PreSafety, lambda={self.threshold}> <<robot>> Pmax=?[G<{self.num_steps} !\"{self.safety}\"];'
return prop
return None
def create_eventually_shielding_property(self):
if not self.num_steps:
prop = f"<eventually_shield_{self.dest}, PreSafety, gamma=0.0> <<robot>> Pmax=?[F \"{self.dest}\"];"
else:
prop = f'<safety_shield_{self.dest}, PreSafety, gamma=0.0> <<robot>> Pmax=?[F<{self.num_steps} \"{self.dest}\"];'
return prop
def create_property_file(self):
with open(self.tmp_properties_file, "w") as properties_file:
if self.safety:
properties_file.write(self.create_safety_shielding_property())
properties_file.write("\n")
#properties_file.write(self.create_eventually_shielding_property())
def get_input(self):
if self.current_state is None:
return None
else:
available_actions = sorted(self.reachability_probabilities.transitions_dict[self.current_state],
key=lambda a: a.probability, reverse=True)
safe_actions = []
if self.safety:
safe_actions = [pair.action for pair in self.safety_shield.transitions_dict[self.current_state]]
i = 0
if self.safety:
while available_actions[i].action not in safe_actions:
i += 1
return available_actions[i].action
def print_shield(self):
assert self.safety_shield is not None
for k in self.safety_shield.transitions_dict:
print(f"State {k}:")
for action in self.safety_shield.transitions_dict[k]:
print(f"{action.action} : {action.probability}", end=", ")
print("")
def is_safe(self, action):
#assert self.safety_shield is not None
if not self.safety_shield:
return True
if not self.current_state:
return True
#for k in self.safety_shield.transitions_dict:
# print(f"{k}: {self.safety_shield.transitions_dict[k]}")
#print(f"Testing safe actions for state (loc) {self.current_state} and action {action}")
#print(f"safe actions are {self.get_safe_actions()}")
try:
safe_actions = [pair.action for pair in self.safety_shield.transitions_dict[self.current_state]]
return action in safe_actions
except:
for k in self.model.states:
print(f"{k.output}")
for k in sorted(self.safety_shield.transitions_dict):
print(f"{k}: {self.safety_shield.transitions_dict[k]}")
print(f"{len(self.model.states)}")
#for k in self.model.states
assert False
def get_safe_action_space(self):
safe_action_space = []
for action in self.safety_shield.transitions_dict[self.current_state]:
safe_action_space.append(action.action)
return safe_action_space
def get_safe_actions(self):
human_string = ""
for action in self.safety_shield.transitions_dict[self.current_state]:
human_string += f"{action.action}<{action.probability}>, "
return human_string
def reset(self):
self.current_state = 'q0'
def step_to(self, input, output):
found_state = False
#print()
for state in self.model.states:
if state.state_id != self.current_state:
continue
for ns in state.transitions[input]:
if ns[0].output == output:
found_state = True
self.current_state = ns[0].state_id
return found_state
return found_state
def call_tempest(self):
import subprocess
from os import path
self.property_val = 0
safety_in_model = False
for s in self.model.states:
if s.output == self.safety:
safety_in_model = True
break
if not safety_in_model:
print('\t\t[WARN] SHIELD NOT COMPUTED')
return self.property_val
model_abs_path = path.abspath(self.tmp_mdp_file)
mdp_file = open(model_abs_path, "rt")
game_header = """
smg
player robot
[up], [down], [left], [right]
endplayer
player none
none
endplayer
module none
endmodule """
data = mdp_file.read()
data = data.replace('mdp', game_header)
mdp_file.close()
mdp_file = open(model_abs_path, "wt")
mdp_file.write(data)
mdp_file.close()
os.system("rm -rf safety_shield_death.shield")
self.create_property_file()
proc = subprocess.Popen(
[tempest_binary, "--prism", model_abs_path, "--prop", self.tmp_properties_file, "--buildstateval", "--buildchoicelab"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = proc.communicate()[0]
out = out.decode('utf-8').splitlines()
for line in out:
#print(line)
if not line:
continue
if 'Syntax error' in line:
print(line)
else:
if "Result" in line:
result_content = line.split(' ')
try:
self.property_val = float(result_content[-1])
except ValueError:
print("Result parsing error")
print(f"Probability to satisfy: {self.property_val}")
if debug: input("cat tmp file")
proc.kill()
self.safety_shield = TempestShieldParser(f"safety_shield_{self.safety}.shield")
#self.reachability_probabilities TempestShieldParser(f"eventually_shield_{self.dest}.shield")
return self.property_val
class ActionProbTuple:
def __init__(self, probability, action):
self.probability = probability
self.action = action
class TempestShieldParser:
def __init__(self, shield_file):
num_shield_header_lines = 3
self.transitions_dict = dict()
with open(shield_file, "r") as f:
self.shield_file_content = f.readlines()
for line in self.shield_file_content[num_shield_header_lines:-1]:
state = str('q' + line[line.find("[") + 1:line.find("]")].split("=")[-1])
safe_actions = list()
for action in line[line.find("]") + 1:-1].split(";"):
probability = float(action[0:action.find(":")])
action_label = action[action.find("{") + 1:action.find("}")]
safe_actions.append(ActionProbTuple(probability, action_label))
self.transitions_dict[state] = safe_actions
| 8,643 | 36.258621 | 144 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/utils.py | import random
from statistics import mean
from aalpy.base import SUL
from tempest_shields import TempestInterface
class StochasticWorldSUL(SUL):
def __init__(self, stochastic_world):
super().__init__()
self.world = stochastic_world
self.goal_reached = False
self.is_done = False
def pre(self):
self.goal_reached = False
self.is_done = False
self.world.reset()
def post(self):
pass
def step(self, letter):
if letter is None:
output = self.world.get_abstraction()
if output[0].isdigit().isdigit():
output = f'state_{output}'
return output
output, reward, done, info = self.world.step(self.world.actions_dict[letter])
if reward == self.world.goal_reward or self.goal_reached:
self.goal_reached = True
return "GOAL"
if done or self.is_done:
self.is_done = True
return "MAX_STEPS_REACHED"
output = self.world.decode(output)
if isinstance(output, tuple):
output = f'{output[0]}_{output[1]}'
if reward != 0:
reward = reward if reward > 0 else f'neg_{reward * -1}'
if output[0].isdigit():
output = f'state_{output}'
if reward != 0:
output = f'{output}_r_{reward}'
return output
def test_model_with_tempest(model, sul, input_al, num_episodes, max_ep_len=100):
num_steps_per_ep = []
goal_reached = 0
max_steps_reached = 0
unsafe_reached = 0
tempest_interface = TempestInterface("GOAL", model)
print(f'Goal Reachability: {tempest_interface.property_val}')
for _ in range(num_episodes):
step_counter = 0
scheduler_step_valid = True
sul.pre()
tempest_interface.reset()
while True:
if step_counter == max_ep_len:
break
i = tempest_interface.get_input()
if not scheduler_step_valid or i is None:
i = random.choice(input_al)
o = sul.step(i)
# print(f"{step_counter: <4} Input: {i} leads to {o}")
step_counter += 1
scheduler_step_valid = tempest_interface.step_to(i, o)
if o == 'GOAL':
goal_reached += 1
break
if o == 'MAX_STEPS_REACHED':
max_steps_reached += 1
break
if o == 'death':
unsafe_reached += 1
num_steps_per_ep.append(step_counter)
sul.post()
print(f'Tested on {num_episodes} episodes:')
print(f'Goal reached : {goal_reached}')
print(f'Max Steps reached : {max_steps_reached}')
print(f'Unsafe reached : {unsafe_reached}')
print(f'Avg. step count : {mean(num_steps_per_ep)}')
def writeSamplesToFile(samples, path="alergiaSamples.txt"):
with open(path, 'a') as f:
for sample in samples:
s = f'{str(sample.pop(0))}'
for i, o in sample:
s += f',{i},{o}'
f.write(s + '\n')
f.close()
samples.clear()
def deleteSampleFile(path="alergiaSamples.txt"):
import os
if os.path.exists(path):
os.remove(path)
| 3,247 | 27.491228 | 85 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/q_learning.py | import sys
import random
from statistics import mean
import gym
from aalpy.learning_algs import run_Alergia, run_JAlergia
import gym_partially_observable_grid
import numpy as np
debug = False
from termcolor import colored
import copy
# Make environment deterministic even if it is stochastic
from tempest_shields import TempestInterface
from utils import writeSamplesToFile, deleteSampleFile
world_file = sys.argv[1]
setting = sys.argv[2]
threshold = sys.argv[3]
force_determinism = False
# Add slip to the observation set (action failed). Only necessary if is_partially_obs is set to True AND you want
# the underlying system to behave like deterministic MDP.
indicate_slip = False
# Use abstraction/partial observability. If set to False, (x,y) coordinates will be used as outputs
is_partially_obs = False
indicate_wall = False
# If one_time_rewards is set to True, reward in single location will be obtained only once per episode.
# Otherwise, reward will be given every time
one_time_rewards = True
step_penalty = 0.5
env = gym.make(id='poge-v1',
world_file_path=world_file,
force_determinism=force_determinism,
indicate_slip=indicate_slip,
is_partially_obs=is_partially_obs,
indicate_wall=indicate_wall,
one_time_rewards=one_time_rewards,
step_penalty=step_penalty)
# Hyper parameters
alpha = 0.1
gamma = 0.9
num_training_episodes = 30000
interval_size = 1000
forbidden_state_reward = -100
# For plotting metrics
all_epochs = []
def safe_argmax(q_table, state, shield):
safe_action_selected = False
action_value_indices = list(q_table[state].argsort())
action = None
while not safe_action_selected and action_value_indices is not None:
action = action_value_indices.pop()
if shield.is_safe(env.action_space_to_act_map[action]):
safe_action_selected = True
if not safe_action_selected:
action = np.argmax(q_table[state])
return action
# def get_abstract_output(state, reward):
# x, y = env.decode(state)
# if env.abstract_world[x][y] == 'd':
# output = 'death'
# elif reward == env.goal_reward:
# output = 'GOAL'
# else:
# output = f's{x}_{y}'
# return output
def get_abstract_output(state, reward):
x, y = env.decode(state)
if env.abstract_world[x][y] == 'd':
output = 'death'
elif reward == env.goal_reward:
output = 'GOAL'
else:
output = env.abstract_world[x][y]
if x + 1 < len(env.abstract_world) and env.abstract_world[x + 1][y] == 'd':
output += 'death_r'
if x - 1 >= 0 and env.abstract_world[x - 1][y] == 'd':
output += 'death_l'
if y + 1 < len(env.abstract_world[x]) and env.abstract_world[x][y + 1] == 'd':
output += 'death_u'
if y - 1 >= 0 and env.abstract_world[x][y - 1] == 'd':
output += 'death_d'
return output
def train_agent(training_type='no_penalty', verbose=False):
epsilon = 0.4
epsilon_threshold = 0.1
assert training_type in {'no_penalty', 'penalty', 'shielded'}
deleteSampleFile();
ep_reward = []
training_forbidden = 0
print(f'Training for {training_type} started.')
training_data = []
model = None
tempest_interface = None
q_table = np.zeros([env.observation_space.n, env.action_space.n])
for i in range(1, num_training_episodes + 1):
# print(f"Episode {i}")
epsilon = epsilon * 0.9999
if epsilon < epsilon_threshold: epsilon = epsilon_threshold
if i % interval_size == 0:
poscount = 0
if debug:
for r in ep_reward:
if r <= 0:
print(colored(r, "red"), end=",")
elif r <= 40:
print(colored(r, "blue"), end=",")
else:
print(colored(r, "green"), end=",")
poscount = poscount + 1
if debug: print(f"\nPoscount: {poscount} / {interval_size}")
print("")
if verbose:
print(f'Episodes [{i - interval_size}, {i}]')
print(f'Avg. reward: {mean(ep_reward)}')
print(f'Avg. forbidden frequency: {training_forbidden // interval_size}')
print('--------------------------------------------------------')
ep_reward.clear()
training_forbidden = 0
if debug: input("")
if training_type == 'shielded':
writeSamplesToFile(training_data)
model = run_JAlergia(path_to_data_file='alergiaSamples.txt', automaton_type='mdp', eps=0.005,
path_to_jAlergia_jar='alergia.jar', heap_memory='-Xmx12g')
# training_data.clear()
tempest_interface = TempestInterface("GOAL", model, 2, "death", threshold)
state = env.reset()
if training_type == 'shielded' and tempest_interface is not None:
tempest_interface.reset()
episode_steps = [get_abstract_output(state, 0)]
culmalative_reward = 0
done = False
while not done:
if random.random() < epsilon:
if training_type == 'shielded' and tempest_interface is not None:
action = env.actions_dict[random.sample(tempest_interface.get_safe_action_space(), 1)[0]]
else:
action = env.action_space.sample()
else:
if training_type != 'shielded' or tempest_interface is None:
action = np.argmax(q_table[state])
else:
action = safe_argmax(q_table, state, tempest_interface)
# if training_type == 'shielded' and tempest_interface is not None: print(f"playerlocation: {env.player_location[0]}, {env.player_location[1]} action: {env.action_space_to_act_map[action]}, shield state: {tempest_interface.current_state}")
next_state, reward, done, info = env.step(action)
x, y = env.player_location[0], env.player_location[1]
# add for Alergia
if training_type == 'shielded':
output = get_abstract_output(next_state, reward)
action_name = env.action_space_to_act_map[action]
if tempest_interface is not None:
tempest_interface.step_to(action_name, output)
episode_steps.append((action_name, output))
# If forbidden state is reached
if env.abstract_world[x][y] == 'd':
training_forbidden += 1
if training_type == 'penalty' or training_type == 'shielded':
reward = forbidden_state_reward
done = True
# print(f"Episode: {episode_steps}, location: {env.player_location}")
# input("")
old_value = q_table[state, action]
next_max = np.max(q_table[next_state])
new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)
q_table[state, action] = new_value
culmalative_reward += reward
if done:
ep_reward.append(culmalative_reward)
training_data.append(episode_steps)
state = next_state
print("Training finished.")
deleteSampleFile()
return q_table, tempest_interface
def train_all_agents():
return train_agent('no_penalty'), train_agent('penalty'), train_agent('shielded')
def evaluate_agent(agent_q_table, shield=None, num_ep=100):
episodes = num_ep
goals_reached = 0
forbidden_state_reached = 0
total_steps = 0
for _ in range(episodes):
state = env.reset()
if shield:
shield.reset()
done = False
while not done:
action = np.argmax(agent_q_table[state]) if shield is None else safe_argmax(q_table, state, shield)
# if debug: print(f"[({env.player_location[0], env.player_location[1]}) [{state}]", end=": ")
# if debug and shield: print(f"allowed actions: {shield.get_safe_actions(state)}", end="")
state, reward, done, info = env.step(action)
# if debug: print(f"reward: {reward}] ->", end="")
if shield:
output = get_abstract_output(state, reward)
# print(f" \n output: {output}", end="")
shield.step_to(env.action_space_to_act_map[action], output)
x, y = env.player_location[0], env.player_location[1]
if env.abstract_world[x][y] == 'd':
reward = forbidden_state_reward
forbidden_state_reached += 1
done = True
if reward == env.goal_reward and done:
goals_reached += 1
total_steps += 1
# if debug: input("")
print(f"Results after {episodes} episodes:")
print(f"Total Number of Goal reached: {goals_reached}")
print(f"Average timesteps per episode: {total_steps / episodes}")
q_table, shield = train_agent(setting, verbose=True)
# shield.print_shield()
# input("")
evaluate_agent(q_table, shield, 1000)
| 9,297 | 33.954887 | 251 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/test_worlds/plot.py | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import csv
import copy
settings=["shielded", "penalty", "no"]
color_dict={"shielded": 0x00ff00, "no": 0x110000, "penalty": 0xff0000}
#filename = "penalty_summary.csv"
epochs = [1000,2000,3000,4000,5000,6000,7000,8000,9000,10000]
subplotrows=3
subplotcols=2
fig, axs = plt.subplots(subplotrows, subplotcols)
subplotrow=0
subplotcol=0
for setting in settings:
data = []
filename = setting + "_summary.csv"
print(f"Plotting {setting}:")
with open(filename,'r') as csvfile:
plots = csv.reader(csvfile, delimiter = ';')
for row in plots:
data.append(row)
for i, row in enumerate(data[1::2]):
row = np.array(list(filter(None, row))).astype(float)
color=copy.deepcopy(color_dict[setting])
color+=0x0000aa * i
color_string=str('0x{0:0{1}x}'.format(color,6)).replace("0x","#")
print(f"{i}th {setting}: {color_string}")
#color=str(hex(color)).replace("0x", "#")
#print(color)
axs[subplotrow%subplotrows, subplotcol%subplotcols].plot(epochs, row, color=color_string)
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
subplotrow=0
subplotcol=0
for i, row in enumerate(data[0::2]):
delim=row[0].find('.')
axs[subplotrow%subplotrows, subplotcol%subplotcols].set_title(row[0][0:delim])
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
for ax in axs.flat:
ax.set(xlabel='epochs', ylabel='reward')
ax.set_yticks(np.arange(-120, 100, step=10)) # Set label locations.
#plt.legend()
#plt.xlabel('Epochs')
#plt.ylabel('Avg. Reward')
#plt.title('dummy')
plt.show()
| 1,837 | 31.245614 | 97 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/experiments/wall_result1/plot.py | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import csv
import copy
plot_y_min=-100
plot_y_max=140
plot_y_step=25
subplotrows=4
subplotcols=2
settings=["shielded", "penalty"]#, "no"]
color_dict={"shielded": 0x00ff00,"penalty": 0xff0000}#, "no": 0x110000, }
epochs = [1000,2000,3000,4000,5000,6000,7000,8000,9000,10000,11000,12000,13000,14000,15000,16000,17000,18000,19000,20000,21000,22000,23000,24000,25000,26000,27000,28000,29000,30000]
fig, axs = plt.subplots(subplotrows, subplotcols)
for setting in settings:
subplotrow=0
subplotcol=0
data = []
filename = setting + "_learning_summary.csv"
#print(f"Plotting {setting}:")
with open(filename,'r') as csvfile:
plots = csv.reader(csvfile, delimiter = ';')
for row in plots:
data.append(row)
for i, row in enumerate(data[1::4]):
row = np.array(list(filter(None, row))).astype(float)
color=copy.deepcopy(color_dict[setting])
#color+=0x00001f * i
color_string=str('0x{0:0{1}x}'.format(color,6)).replace("0x","#")
axs[subplotrow%subplotrows, subplotcol%subplotcols].plot(epochs, row, color=color_string, label=row[0])
axs[subplotrow%subplotrows, subplotcol%subplotcols].legend()
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
mins = []
maxs = []
for i, row in enumerate(data[2::4]):
row = np.array(list(filter(None, row))).astype(float)
mins.append(row)
for i, row in enumerate(data[3::4]):
row = np.array(list(filter(None, row))).astype(float)
maxs.append(row)
subplotrow=0
subplotcol=0
for i, row in enumerate(mins):
color=copy.deepcopy(color_dict[setting])
#color+=0x00001f * i
color_string=str('0x{0:0{1}x}'.format(color,6)).replace("0x","#")
axs[subplotrow%subplotrows, subplotcol%subplotcols].fill_between(epochs, mins[i], maxs[i], color=color_string, alpha=0.1)
axs[subplotrow%subplotrows, subplotcol%subplotcols].legend()
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
subplotrow=0
subplotcol=0
for i, row in enumerate(data[0::4]):
delim=row[0].find('.')
axs[subplotrow%subplotrows, subplotcol%subplotcols].set_title(row[0][0:delim])
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
for ax in axs.flat:
ax.set(xlabel='epochs', ylabel='reward')
ax.set_yticks(np.arange(plot_y_min, plot_y_max, step=plot_y_step)) # Set label locations.
#plt.legend()
#plt.xlabel('Epochs')
#plt.ylabel('Avg. Reward')
#plt.title('dummy')
plt.show()
#tikzplotlib.save("result_wall1.tex")
| 2,854 | 34.246914 | 181 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/experiments/wall_result1/plot_automata_sizes.py | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import csv
import copy
import tikzplotlib
settings=["shielded"]#, "penalty"]#, "no"]
color_dict={"shielded": 0x00ff00}#,"penalty": 0xff0000}#, "no": 0x110000, }
model_sizes = [5*9-6,8*9-12,8*14-20,11*14-30,11*18-39,14*18-52,14*22-64,17*22-80]
epochs = [1000,2000,3000,4000,5000,6000,7000,8000,9000,10000,11000,12000,13000,14000,15000,16000,17000,18000,19000,20000,21000,22000,23000,24000,25000,26000,27000,28000,29000,30000]
def constant_function(x):
return np.full(len(epochs), x)
subplotrows=4
subplotcols=2
fig, axs = plt.subplots(subplotrows, subplotcols)
for setting in settings:
subplotrow=0
subplotcol=0
data = []
filename = "automata_sizes.csv"
#print(f"Plotting {setting}:")
with open(filename,'r') as csvfile:
plots = csv.reader(csvfile, delimiter = ';')
for i, row in enumerate(plots):
data.append(row)
for i, row in enumerate(data[1::4]):
row = np.array(list(filter(None, row))).astype(float)
color=copy.deepcopy(color_dict[setting])
#color+=0x00001f * i
color_string=str('0x{0:0{1}x}'.format(color,6)).replace("0x","#")
#print(f"{i}th {setting}: {color_string}")
axs[subplotrow%subplotrows, subplotcol%subplotcols].plot(epochs, row, color=color_string, label=row[0])
axs[subplotrow%subplotrows, subplotcol%subplotcols].plot(epochs, constant_function(model_sizes[i]), color='#000000')
#axs[subplotrow%subplotrows, subplotcol%subplotcols].legend()
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
mins = []
maxs = []
for i, row in enumerate(data[2::4]):
row = np.array(list(filter(None, row))).astype(float)
mins.append(row)
for i, row in enumerate(data[3::4]):
row = np.array(list(filter(None, row))).astype(float)
maxs.append(row)
subplotrow=0
subplotcol=0
for i, row in enumerate(mins):
color=copy.deepcopy(color_dict[setting])
#color+=0x00001f * i
color_string=str('0x{0:0{1}x}'.format(color,6)).replace("0x","#")
#print(f"{i}th {setting}: {color_string}")
axs[subplotrow%subplotrows, subplotcol%subplotcols].fill_between(epochs, mins[i], maxs[i], color=color_string, alpha=0.1)
#axs[subplotrow%subplotrows, subplotcol%subplotcols].legend()
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
subplotrow=0
subplotcol=0
for i, row in enumerate(data[0::4]):
delim=row[0].find('.')
axs[subplotrow%subplotrows, subplotcol%subplotcols].set_title(row[0][0:delim])
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
for ax in axs.flat:
ax.set(xlabel='epochs', ylabel='reward')
ax.set_yticks(np.arange(0, 300, step=50)) # Set label locations.
#plt.xlabel('Epochs')
#plt.ylabel('Avg. Reward')
#plt.title('dummy')
plt.show()
#tikzplotlib.save("result_unsafe1.tex")
| 3,169 | 35.860465 | 181 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/experiments/unsafe_result1/plot.py | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import csv
import copy
settings=["shielded", "penalty"]#, "no"]
color_dict={"shielded": 0x00ff00,"penalty": 0xff0000}#, "no": 0x110000, }
epochs = [1000,2000,3000,4000,5000,6000,7000,8000,9000,10000,11000,12000,13000,14000,15000,16000,17000,18000,19000,20000,21000,22000,23000,24000,25000,26000,27000,28000,29000,30000]
subplotrows=4
subplotcols=2
fig, axs = plt.subplots(subplotrows, subplotcols)
for setting in settings:
subplotrow=0
subplotcol=0
data = []
filename = setting + "_learning_summary.csv"
#print(f"Plotting {setting}:")
with open(filename,'r') as csvfile:
plots = csv.reader(csvfile, delimiter = ';')
for i, row in enumerate(plots):
data.append(row)
for i, row in enumerate(data[1::4]):
if i == 0 or i == 1:
continue
row = np.array(list(filter(None, row))).astype(float)
color=copy.deepcopy(color_dict[setting])
#color+=0x00001f * i
color_string=str('0x{0:0{1}x}'.format(color,6)).replace("0x","#")
#print(f"{i}th {setting}: {color_string}")
axs[subplotrow%subplotrows, subplotcol%subplotcols].plot(epochs, row, color=color_string, label=row[0])
#axs[subplotrow%subplotrows, subplotcol%subplotcols].legend()
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
mins = []
maxs = []
for i, row in enumerate(data[2::4]):
row = np.array(list(filter(None, row))).astype(float)
mins.append(row)
for i, row in enumerate(data[3::4]):
row = np.array(list(filter(None, row))).astype(float)
maxs.append(row)
subplotrow=0
subplotcol=0
for i, row in enumerate(mins):
if i == 0 or i == 1:
continue
color=copy.deepcopy(color_dict[setting])
#color+=0x00001f * i
color_string=str('0x{0:0{1}x}'.format(color,6)).replace("0x","#")
#print(f"{i}th {setting}: {color_string}")
axs[subplotrow%subplotrows, subplotcol%subplotcols].fill_between(epochs, mins[i], maxs[i], color=color_string, alpha=0.1)
#axs[subplotrow%subplotrows, subplotcol%subplotcols].legend()
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
subplotrow=0
subplotcol=0
for i, row in enumerate(data[0::4]):
delim=row[0].find('.')
axs[subplotrow%subplotrows, subplotcol%subplotcols].set_title(row[0][0:delim])
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
for ax in axs.flat:
ax.set(xlabel='epochs', ylabel='reward')
ax.set_yticks(np.arange(-120, 100, step=10)) # Set label locations.
#plt.xlabel('Epochs')
#plt.ylabel('Avg. Reward')
#plt.title('dummy')
plt.show()
#tikzplotlib.save("result_unsafe1.tex")
| 2,993 | 34.642857 | 181 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/experiments/slipery_result1/plot.py | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import csv
import copy
plot_y_min=0
plot_y_max=150
plot_y_step=25
subplotrows=4
subplotcols=2
settings=["shielded", "penalty"]#, "no"]
color_dict={"shielded": 0x00ff00,"penalty": 0xff0000}#, "no": 0x110000, }
epochs = [1000,2000,3000,4000,5000,6000,7000,8000,9000,10000,11000,12000,13000,14000,15000,16000,17000,18000,19000,20000,21000,22000,23000,24000,25000,26000,27000,28000,29000,30000]
def constant_function(x):
return np.full(len(epochs), x)
fig, axs = plt.subplots(subplotrows, subplotcols)
for setting in settings:
subplotrow=0
subplotcol=0
data = []
filename = setting + "_learning_summary.csv"
#print(f"Plotting {setting}:")
with open(filename,'r') as csvfile:
plots = csv.reader(csvfile, delimiter = ';')
for row in plots:
data.append(row)
for i, row in enumerate(data[1::4]):
row = np.array(list(filter(None, row))).astype(float)
color=copy.deepcopy(color_dict[setting])
#color+=0x00001f * i
color_string=str('0x{0:0{1}x}'.format(color,6)).replace("0x","#")
#print(f"{i}th {setting}: {color_string}")
axs[subplotrow%subplotrows, subplotcol%subplotcols].plot(epochs, row, color=color_string, label=row[0])
#axs[subplotrow%subplotrows, subplotcol%subplotcols].plot(epochs, constant_function(model_sizes[i]), color='#000000')
#axs[subplotrow%subplotrows, subplotcol%subplotcols].legend()
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
mins = []
maxs = []
for i, row in enumerate(data[2::4]):
row = np.array(list(filter(None, row))).astype(float)
mins.append(row)
for i, row in enumerate(data[3::4]):
row = np.array(list(filter(None, row))).astype(float)
maxs.append(row)
subplotrow=0
subplotcol=0
for i, row in enumerate(mins):
color=copy.deepcopy(color_dict[setting])
#color+=0x00001f * i
color_string=str('0x{0:0{1}x}'.format(color,6)).replace("0x","#")
#print(f"{i}th {setting}: {color_string}")
axs[subplotrow%subplotrows, subplotcol%subplotcols].fill_between(epochs, mins[i], maxs[i], color=color_string, alpha=0.1)
#axs[subplotrow%subplotrows, subplotcol%subplotcols].legend()
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
subplotrow=0
subplotcol=0
for i, row in enumerate(data[0::4]):
delim=row[0].find('.')
axs[subplotrow%subplotrows, subplotcol%subplotcols].set_title(row[0][0:delim])
subplotcol=subplotcol+1
if subplotcol == subplotcols:
subplotrow=subplotrow+1
subplotcol = 0
for ax in axs.flat:
ax.set(xlabel='epochs', ylabel='reward')
ax.set_yticks(np.arange(plot_y_min, plot_y_max, step=plot_y_step)) # Set label locations.
#plt.legend()
#plt.xlabel('Epochs')
#plt.ylabel('Avg. Reward')
#plt.title('dummy')
plt.show()
| 3,104 | 35.529412 | 181 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/gym_partially_observable_grid/utils.py | from collections import defaultdict
from random import choices
class StochasticTile:
def __init__(self, rule_id):
self.rule_id = rule_id
self.behaviour = dict()
def add_stochastic_action(self, action, new_action_probabilities):
self.behaviour[action] = new_action_probabilities
assert round(sum([p[1] for p in new_action_probabilities]), 5) == 1.0
def get_action(self, action):
if action not in self.behaviour.keys():
return action
actions = [a[0] for a in self.behaviour[action]]
prob_dist = [a[1] for a in self.behaviour[action]]
new_action = choices(actions, prob_dist, k=1)[0]
return new_action
def get_all_actions(self):
return list({action_prob_pair[0] for rule in self.behaviour.values() for action_prob_pair in rule})
class PartiallyObsGridworldParser:
def __init__(self, path_to_file):
self.content = defaultdict(list)
# State space
self.state_space = 0
# Representation of concrete ((x,y) coordinates) world and abstract world
self.world, self.abstract_world = None, None
# Map of stochastic tiles, where each tile is identified by rule_id
self.rules = dict()
# Map of locations to rule_ids, that is, tile has stochastic behaviour
self.stochastic_tile = dict()
# Map of locations that return a reward
self.reward_tiles = dict()
# Map of symbols to rewards
self.symbol_reward_map = dict()
# Map of abstract symbols to their names (if any)
self.abstract_symbol_name_map = dict()
# Variables
self.initial_location = None
self.player_location = None
self.goal_location = set()
self.terminal_locations = set()
self.behavioral_toggles = set()
self._parse_file(path_to_file)
self._parse_world_and_abstract_world()
self._parse_abstraction_mappings()
self._parse_layout_variables()
self._parse_rewards()
self._parse_rules()
def _parse_file(self, path_to_file):
file = open(path_to_file, 'r')
current_section = None
sections = ['Layout', 'Abstraction', 'Behaviour', 'Rewards']
for line in file.readlines():
line = line.strip()
if not line or line.startswith('//'):
continue
is_header = False
for section_name in sections:
if section_name in line:
current_section = section_name
is_header = True
if is_header:
continue
self.content[current_section].append(line)
def _parse_world_and_abstract_world(self):
self.world = [list(l.strip()) for l in self.content['Layout']]
self.abstract_world = [list(l.strip()) for l in self.content['Abstraction'][:len(self.world)]]
if not self.content['Abstraction']:
self.abstract_world = None
else:
assert len(self.world) == len(self.abstract_world)
def _parse_abstraction_mappings(self):
abstract_tiles = set()
for i, line in enumerate(self.content['Abstraction']):
if i <= len(self.world) - 1:
for abstract_tile in list(line):
if abstract_tile not in {'#', 'D', 'G', 'E', ' '}:
abstract_tiles.add(abstract_tile)
# add custom names
else:
id_name_pair = line.split(':')
id, name = id_name_pair[0], id_name_pair[1]
self.abstract_symbol_name_map[id] = name
# if some abstract tile does not have defined name, it will just be itself :)
for at in list(abstract_tiles):
if at not in self.abstract_symbol_name_map.keys():
self.abstract_symbol_name_map[at] = at
def _parse_layout_variables(self):
for x, line in enumerate(self.content['Layout']):
for y, tile in enumerate(line):
if tile == 'E':
self.player_location = (x, y)
self.initial_location = (x, y)
self.world[self.player_location[0]][self.player_location[1]] = ' '
if tile == 'G':
self.goal_location.add((x, y))
if tile == 'T':
self.terminal_locations.add((x, y))
if tile == '@':
self.behavioral_toggles.add((x, y))
if tile != '#' and tile != 'D':
self.state_space += 1
assert self.player_location and self.goal_location
def _parse_rewards(self):
for x, line in enumerate(self.content['Rewards']):
if x <= len(self.world) - 1:
for y, tile in enumerate(line):
if tile not in {'#', 'D', 'G', ' '}:
self.reward_tiles[(x, y)] = tile
else:
symbol_value_pair = line.split(':')
symbol, value = symbol_value_pair[0], symbol_value_pair[1]
self.symbol_reward_map[symbol] = int(value)
for k, v in self.reward_tiles.items():
self.reward_tiles[k] = self.symbol_reward_map[v]
def _parse_rules(self):
# Extract the rules layout
rule_world = []
for index, line in enumerate(self.content['Behaviour']):
# First part of the rules section corresponds to a map
if index <= len(self.world) - 1:
rule_world.append(line)
else:
self._parse_and_process_rule(line)
for x, line in enumerate(rule_world):
for y, tile in enumerate(line):
if tile in self.rules.keys():
tile_xy = (x, y)
self.stochastic_tile[tile_xy] = tile
def _parse_and_process_rule(self, rule):
actions_dict = {'up': 0, 'down': 1, 'left': 2, 'right': 3}
rule = ''.join(rule)
rule = rule.replace(" ", '')
rule_parts = rule.split('-')
rule_id = rule_parts[0]
rule_action = actions_dict[rule_parts[1]]
rule_mappings = rule_parts[2]
rule_mappings = rule_mappings.lstrip('[').rstrip(']')
if rule_id not in self.rules.keys():
self.rules[rule_id] = StochasticTile(rule_id)
action_prob_pairs = []
for action_prob in rule_mappings.split(','):
ap = action_prob.split(':')
action = actions_dict[ap[0]]
prob = float(ap[1])
action_prob_pairs.append((action, prob))
self.rules[rule_id].add_stochastic_action(rule_action, action_prob_pairs)
| 6,717 | 37.170455 | 107 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/gym_partially_observable_grid/__init__.py | from gym.envs.registration import register
register(
id='poge-v1',
entry_point='gym_partially_observable_grid.envs:PartiallyObservableWorld',
) | 155 | 25 | 79 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/gym_partially_observable_grid/envs/PartiallyObsGridEnv.py | from copy import deepcopy
import gym
from gym import spaces
from gym_partially_observable_grid.utils import PartiallyObsGridworldParser
class PartiallyObservableWorld(gym.Env):
def __init__(self,
world_file_path,
force_determinism=False,
indicate_slip=False,
is_partially_obs=True,
indicate_wall=False,
max_ep_len=200,
goal_reward=100,
one_time_rewards=True,
step_penalty=0):
# Available actions
self.actions_dict = {'up': 0, 'down': 1, 'left': 2, 'right': 3}
self.action_space_to_act_map = {i:k for k,i in self.actions_dict.items()}
self.actions = [0, 1, 2, 3]
parser = PartiallyObsGridworldParser(world_file_path)
# State space size from layout file
self.state_space = parser.state_space
# Representation of concrete ((x,y) coordinates) world and abstract world
self.world, self.abstract_world = parser.world, parser.abstract_world
# Map of abstract symbols to their names (if any)
self.abstract_symbol_name_map = parser.abstract_symbol_name_map
# Map of stochastic tiles, where each tile is identified by rule_id
self.rules = parser.rules
# Map of locations to rule_ids, that is, tile has stochastic behaviour
# force_determinism - this option exist if you want to make a stochastic env. deterministic
self.stochastic_tile = parser.stochastic_tile if not force_determinism else dict()
# Map of locations that return a reward
self.reward_tiles = parser.reward_tiles
# If one_time_rewards set to True, reward for that tile will be receive only once during the episode
self.one_time_rewards = one_time_rewards
self.collected_rewards = set()
# If true, once the executed action is not the same as the desired action,
# 'slip' will be added to abstract output
self.indicate_slip = indicate_slip
self.slip_action = None
# If indicate_wall is set to True, suffix '_wall' will be added once the agent runs into the wall
self.indicate_wall = indicate_wall
# Indicate whether observations will be abstracted or will they be x-y coordinates
self.is_partially_obs = is_partially_obs
# If abstraction is not defined, environment cannot be partially observable
if self.abstract_world is None:
self.is_partially_obs = False
# Layout variables
self.initial_location = parser.initial_location
self.player_location = parser.player_location
self.goal_locations = parser.goal_location
self.terminal_locations = parser.terminal_locations
self.behavioral_toggles = parser.behavioral_toggles
# Should stochastic behaviour be enabled
self.use_stochastic_tiles = True
# Reward reached when reaching goal or negative amount when terminal state will be reached
self.goal_reward = goal_reward
# Step penalty that will be returned every every step if the reward is not reached
self.step_penalty = step_penalty if step_penalty < 0 else step_penalty * -1
# Episode length
self.max_ep_len = max_ep_len
self.step_counter = 0
# Action and Observation Space
self.one_hot_2_state_map, self.one_hot_2_state_map = None, None
self.action_space = spaces.Discrete(4)
self.observation_space = spaces.Discrete(self._get_obs_space())
def _get_obs_space(self):
self.state_2_one_hot_map = {}
counter = 0
abstract_symbols = set()
world_to_process = self.world if not self.is_partially_obs else self.abstract_world
for x, row in enumerate(world_to_process):
for y, tile in enumerate(row):
if tile not in {'#', 'D', 'E'}:
if self.is_partially_obs:
if tile == ' ' or tile == 'G':
self.state_2_one_hot_map[(x, y)] = counter
counter += 1
else:
abstraction = self.abstract_symbol_name_map[tile]
if abstraction not in abstract_symbols:
abstract_symbols.add(abstraction)
self.state_2_one_hot_map[abstraction] = counter
counter += 1
else:
self.state_2_one_hot_map[(x, y)] = counter
counter += 1
if self.rules:
for state in list(self.state_2_one_hot_map.keys()):
for act in self.actions_dict.keys():
slip_state = f'{state}_slip_{act}'
self.state_2_one_hot_map[slip_state] = counter
counter += 1
if self.indicate_wall:
for output in list(self.state_2_one_hot_map.keys()):
self.state_2_one_hot_map[f'{output}_wall'] = counter
counter += 1
self.one_hot_2_state_map = {v: k for k, v in self.state_2_one_hot_map.items()}
return counter
def step(self, action):
assert action in self.actions
self.step_counter += 1
new_location = self._get_new_location(action)
if self.world[new_location[0]][new_location[1]] == '#':
observation = self.get_observation()
if self.indicate_wall:
observation = f'{observation}_wall'
done = True if self.step_counter >= self.max_ep_len else False
return self.encode(observation), self.step_penalty, done, {}
# If you open the door, perform that step once more and enter new room
if self.world[new_location[0]][new_location[1]] == 'D':
self.player_location = new_location
new_location = self._get_new_location(action)
# Update player location
self.player_location = new_location
# Account for behavioural toggle (disable/enable stochastic behaviour)
if self.player_location in self.behavioral_toggles:
self.use_stochastic_tiles = not self.use_stochastic_tiles
# Reward is reached if goal is reached. This terminates the episode.
reward = 0
if self.player_location in self.reward_tiles.keys():
if self.one_time_rewards and self.player_location not in self.collected_rewards:
reward = self.reward_tiles[self.player_location]
elif not self.one_time_rewards:
reward = self.reward_tiles[self.player_location]
self.collected_rewards.add(self.player_location)
done = False
if self.player_location in self.goal_locations:
reward = self.goal_reward
done = True
if self.player_location in self.terminal_locations:
reward = self.goal_reward * -1
done = True
if self.step_counter >= self.max_ep_len:
done = True
if self.step_penalty != 0 and reward == 0:
reward = self.step_penalty
observation = self.get_observation()
return self.encode(observation), reward, done, {}
def get_observation(self):
if self.is_partially_obs:
if self.indicate_slip and self.slip_action is not None:
observation = f'{self.get_abstraction()}_slip_{self.slip_action}'
else:
observation = self.get_abstraction()
else:
observation = self.player_location
return observation
def _get_new_location(self, action):
old_action = action
self.slip_action = None
if self.player_location in self.stochastic_tile.keys() and self.use_stochastic_tiles:
action = self.rules[self.stochastic_tile[self.player_location]].get_action(action)
if old_action != action:
self.slip_action = self.action_space_to_act_map[action]
return self.move(action)
def move(self, action):
if action == 0: # up
return self.player_location[0] - 1, self.player_location[1]
if action == 1: # down
return self.player_location[0] + 1, self.player_location[1]
if action == 2: # left
return self.player_location[0], self.player_location[1] - 1
if action == 3: # right
return self.player_location[0], self.player_location[1] + 1
def encode(self, state):
return self.state_2_one_hot_map[state]
def decode(self, one_hot_enc):
return self.one_hot_2_state_map[one_hot_enc]
def get_abstraction(self):
abstract_tile = self.abstract_world[self.player_location[0]][self.player_location[1]]
if abstract_tile != ' ':
return self.abstract_symbol_name_map[abstract_tile]
else:
return self.player_location
def reset(self):
self.step_counter = 0
self.slip_action = None
self.use_stochastic_tiles = True
self.player_location = self.initial_location[0], self.initial_location[1]
self.collected_rewards.clear()
return self.encode(self.get_observation())
def render(self, mode='human'):
world_copy = deepcopy(self.world)
world_copy[self.player_location[0]][self.player_location[1]] = 'E'
for l in world_copy:
print("".join(l))
def play(self):
self.reset()
user_input_map = {'w': 0, 's': 1, 'a': 2, 'd': 3}
print('Agent is controlled with w,a,s,d; for up,left,down,right actions.')
while True:
self.render()
action = input('Action: ', )
output, reward, done, info = self.step(user_input_map[action])
print(f'Output: {self.decode(output), reward, done, info}')
| 9,926 | 40.190871 | 108 | py |
Automata-Learning-meets-Shielding | Automata-Learning-meets-Shielding-master/gym_partially_observable_grid/envs/__init__.py | from gym_partially_observable_grid.envs.PartiallyObsGridEnv import PartiallyObservableWorld | 91 | 91 | 91 | py |
spegg | spegg-master/tests/googletest/googletest/xcode/Scripts/versiongenerate.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| 4,536 | 43.920792 | 80 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_list_tests_unittest.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*( __ptr64)?
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*( __ptr64)?
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| 6,537 | 30.432692 | 79 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_throw_on_failure_test.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print('Running "%s". . .' % ' '.join(command))
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| 5,767 | 32.534884 | 79 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_xml_outfiles_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| 5,340 | 39.157895 | 140 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_filter_unittest.py | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
pass
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'EMPTY_VAR\' in os.environ)'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'UNSET_VAR\' not in os.environ)'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(set(set_var), set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(set(tests_to_run) - set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| 21,325 | 32.478807 | 80 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_xml_test_utils.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % (
actual_node.tagName, expected_attributes.keys(),
actual_attributes.keys()))
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(
expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ: %s vs %s' %
(expected_attr.name, actual_node.tagName,
expected_attr.value, actual_attr.value))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.items():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName == 'testsuites':
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*',
'', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| 8,872 | 44.502564 | 79 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_test_utils.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
sys.stdout.write(message)
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| 10,823 | 32.719626 | 79 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_shuffle_test.py | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| 12,549 | 37.496933 | 79 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_env_var_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT"""
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main()
| 4,036 | 33.211864 | 79 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_help_test.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| 5,856 | 32.855491 | 75 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_break_on_failure_unittest.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| 7,339 | 33.460094 | 79 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_output_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import difflib
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# 'internal_skip_environment_and_ad_hoc_tests' argument.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
not IS_WINDOWS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'r')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual,
'\n'.join(difflib.unified_diff(
normalized_golden.split('\n'),
normalized_actual.split('\n'),
'golden', 'actual')))
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| 12,259 | 34.953079 | 79 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_catch_exceptions_test.py | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| 9,901 | 40.605042 | 78 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_uninitialized_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| 2,482 | 33.971831 | 77 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_color_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| 4,911 | 36.496183 | 76 | py |
spegg | spegg-master/tests/googletest/googletest/test/gtest_xml_output_unittest.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
 Expected: 1
To be equal to: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected: 1
To be equal to: 2%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
 Expected: 1
To be equal to: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected: 1
To be equal to: 2%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
 Expected: 2
To be equal to: 3" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected: 2
To be equal to: 3%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError:
e = sys.exc_info()[1]
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| 14,677 | 46.501618 | 225 | py |
spegg | spegg-master/tests/googletest/googletest/scripts/fuse_gtest_files.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
pass
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print('ERROR: Cannot find %s in directory %s.' % (relative_path,
directory))
print('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print('ABORTED.')
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = open(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in open(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in open(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = open(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print(__doc__)
sys.exit(1)
if __name__ == '__main__':
main()
| 8,884 | 33.980315 | 78 | py |
spegg | spegg-master/tests/googletest/googletest/scripts/upload.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| 51,024 | 35.761527 | 80 | py |
spegg | spegg-master/tests/googletest/googletest/scripts/pump.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| 23,673 | 26.656542 | 80 | py |
spegg | spegg-master/tests/googletest/googletest/scripts/gen_gtest_pred_impl.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| 21,986 | 29.077975 | 76 | py |
spegg | spegg-master/tests/googletest/googletest/scripts/upload_gtest.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = 'googletestframework@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| 2,851 | 35.101266 | 72 | py |
spegg | spegg-master/tests/googletest/googletest/scripts/common.py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Shared utilities for writing scripts for Google Test/Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
# Matches the line from 'svn info .' output that describes what SVN
# path the current local directory corresponds to. For example, in
# a googletest SVN workspace's trunk/test directory, the output will be:
#
# URL: https://googletest.googlecode.com/svn/trunk/test
_SVN_INFO_URL_RE = re.compile(r'^URL: https://(\w+)\.googlecode\.com/svn(.*)')
def GetCommandOutput(command):
"""Runs the shell command and returns its stdout as a list of lines."""
f = os.popen(command, 'r')
lines = [line.strip() for line in f.readlines()]
f.close()
return lines
def GetSvnInfo():
"""Returns the project name and the current SVN workspace's root path."""
for line in GetCommandOutput('svn info .'):
m = _SVN_INFO_URL_RE.match(line)
if m:
project = m.group(1) # googletest or googlemock
rel_path = m.group(2)
root = os.path.realpath(rel_path.count('/') * '../')
return project, root
return None, None
def GetSvnTrunk():
"""Returns the current SVN workspace's trunk root path."""
_, root = GetSvnInfo()
return root + '/trunk' if root else None
def IsInGTestSvn():
project, _ = GetSvnInfo()
return project == 'googletest'
def IsInGMockSvn():
project, _ = GetSvnInfo()
return project == 'googlemock'
| 2,919 | 33.761905 | 78 | py |
spegg | spegg-master/tests/googletest/googletest/scripts/release_docs.py | #!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for branching Google Test/Mock wiki pages for a new version.
SYNOPSIS
release_docs.py NEW_RELEASE_VERSION
Google Test and Google Mock's external user documentation is in
interlinked wiki files. When we release a new version of
Google Test or Google Mock, we need to branch the wiki files
such that users of a specific version of Google Test/Mock can
look up documenation relevant for that version. This script
automates that process by:
- branching the current wiki pages (which document the
behavior of the SVN trunk head) to pages for the specified
version (e.g. branching FAQ.wiki to V2_6_FAQ.wiki when
NEW_RELEASE_VERSION is 2.6);
- updating the links in the branched files to point to the branched
version (e.g. a link in V2_6_FAQ.wiki that pointed to
Primer.wiki#Anchor will now point to V2_6_Primer.wiki#Anchor).
NOTE: NEW_RELEASE_VERSION must be a NEW version number for
which the wiki pages don't yet exist; otherwise you'll get SVN
errors like "svn: Path 'V1_7_PumpManual.wiki' is not a
directory" when running the script.
EXAMPLE
$ cd PATH/TO/GTEST_SVN_WORKSPACE/trunk
$ scripts/release_docs.py 2.6 # create wiki pages for v2.6
$ svn status # verify the file list
$ svn diff # verify the file contents
$ svn commit -m "release wiki pages for v2.6"
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import common
# Wiki pages that shouldn't be branched for every gtest/gmock release.
GTEST_UNVERSIONED_WIKIS = ['DevGuide.wiki']
GMOCK_UNVERSIONED_WIKIS = [
'DesignDoc.wiki',
'DevGuide.wiki',
'KnownIssues.wiki'
]
def DropWikiSuffix(wiki_filename):
"""Removes the .wiki suffix (if any) from the given filename."""
return (wiki_filename[:-len('.wiki')] if wiki_filename.endswith('.wiki')
else wiki_filename)
class WikiBrancher(object):
"""Branches ..."""
def __init__(self, dot_version):
self.project, svn_root_path = common.GetSvnInfo()
if self.project not in ('googletest', 'googlemock'):
sys.exit('This script must be run in a gtest or gmock SVN workspace.')
self.wiki_dir = svn_root_path + '/wiki'
# Turn '2.6' to 'V2_6_'.
self.version_prefix = 'V' + dot_version.replace('.', '_') + '_'
self.files_to_branch = self.GetFilesToBranch()
page_names = [DropWikiSuffix(f) for f in self.files_to_branch]
# A link to Foo.wiki is in one of the following forms:
# [Foo words]
# [Foo#Anchor words]
# [http://code.google.com/.../wiki/Foo words]
# [http://code.google.com/.../wiki/Foo#Anchor words]
# We want to replace 'Foo' with 'V2_6_Foo' in the above cases.
self.search_for_re = re.compile(
# This regex matches either
# [Foo
# or
# /wiki/Foo
# followed by a space or a #, where Foo is the name of an
# unversioned wiki page.
r'(\[|/wiki/)(%s)([ #])' % '|'.join(page_names))
self.replace_with = r'\1%s\2\3' % (self.version_prefix,)
def GetFilesToBranch(self):
"""Returns a list of .wiki file names that need to be branched."""
unversioned_wikis = (GTEST_UNVERSIONED_WIKIS if self.project == 'googletest'
else GMOCK_UNVERSIONED_WIKIS)
return [f for f in os.listdir(self.wiki_dir)
if (f.endswith('.wiki') and
not re.match(r'^V\d', f) and # Excluded versioned .wiki files.
f not in unversioned_wikis)]
def BranchFiles(self):
"""Branches the .wiki files needed to be branched."""
print 'Branching %d .wiki files:' % (len(self.files_to_branch),)
os.chdir(self.wiki_dir)
for f in self.files_to_branch:
command = 'svn cp %s %s%s' % (f, self.version_prefix, f)
print command
os.system(command)
def UpdateLinksInBranchedFiles(self):
for f in self.files_to_branch:
source_file = os.path.join(self.wiki_dir, f)
versioned_file = os.path.join(self.wiki_dir, self.version_prefix + f)
print 'Updating links in %s.' % (versioned_file,)
text = file(source_file, 'r').read()
new_text = self.search_for_re.sub(self.replace_with, text)
file(versioned_file, 'w').write(new_text)
def main():
if len(sys.argv) != 2:
sys.exit(__doc__)
brancher = WikiBrancher(sys.argv[1])
brancher.BranchFiles()
brancher.UpdateLinksInBranchedFiles()
if __name__ == '__main__':
main()
| 6,132 | 37.572327 | 80 | py |
spegg | spegg-master/tests/googletest/googlemock/test/gmock_leak_test.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests that leaked mock objects can be caught be Google Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gmock_test_utils
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_')
TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*']
TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*']
TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*']
environ = gmock_test_utils.environ
SetEnvVar = gmock_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gmock_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
class GMockLeakTest(gmock_test_utils.TestCase):
def testCatchesLeakedMockByDefault(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL,
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL,
env=environ).exit_code)
def testDoesNotCatchLeakedMockWhenDisabled(self):
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabled(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=1'],
env=environ).exit_code)
def testCatchesMultipleLeakedMocks(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
if __name__ == '__main__':
gmock_test_utils.Main()
| 4,384 | 39.229358 | 73 | py |
spegg | spegg-master/tests/googletest/googlemock/test/gmock_test_utils.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../gtest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../gtest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
import gtest_test_utils # pylint: disable-msg=C6204
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes utilities from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
TestCase = gtest_test_utils.TestCase
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| 3,684 | 31.610619 | 79 | py |
spegg | spegg-master/tests/googletest/googlemock/test/gmock_output_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Mocking Framework.
SYNOPSIS
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read()
golden_file.close()
# The normalized output should match the golden file.
self.assertEquals(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEquals(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
gmock_test_utils.Main()
| 5,999 | 32.149171 | 80 | py |
spegg | spegg-master/tests/googletest/googlemock/scripts/fuse_gmock_files.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gmock_files.py v0.1.0
Fuses Google Mock and Google Test source code into two .h files and a .cc file.
SYNOPSIS
fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR
Scans GMOCK_ROOT_DIR for Google Mock and Google Test source
code, assuming Google Test is in the GMOCK_ROOT_DIR/../googletest
directory, and generates three files:
OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and
OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests
by adding OUTPUT_DIR to the include search path and linking
with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain
everything you need to use Google Mock. Hence you can
"install" Google Mock by copying them to wherever you want.
GMOCK_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gmock_files.py fused_gmock
./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Mock or Google Test headers. Please
report any problems to googlemock@googlegroups.com. You can read
http://code.google.com/p/googlemock/wiki/CookBook for more
information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Mock root directory.
DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# We need to call into googletest/scripts/fuse_gtest_files.py.
sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, '../googletest/scripts'))
import fuse_gtest_files
gtest = fuse_gtest_files
# Regex for matching '#include "gmock/..."'.
INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"')
# Where to find the source seed files.
GMOCK_H_SEED = 'include/gmock/gmock.h'
GMOCK_ALL_CC_SEED = 'src/gmock-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GMOCK_H_OUTPUT = 'gmock/gmock.h'
GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc'
def GetGTestRootDir(gmock_root):
"""Returns the root directory of Google Test."""
return os.path.join(gmock_root, '../googletest')
def ValidateGMockRootDir(gmock_root):
"""Makes sure gmock_root points to a valid gmock root directory.
The function aborts the program on failure.
"""
gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root))
gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED)
gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT)
def FuseGMockH(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock/gmock.h in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gmock headers we've processed.
def ProcessFile(gmock_header_path):
"""Processes the given gmock header file."""
# We don't process the same header twice.
if gmock_header_path in processed_files:
return
processed_files.add(gmock_header_path)
# Reads each line in the given gmock header.
for line in file(os.path.join(gmock_root, gmock_header_path), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/foo.h"'. We translate it to
# "gtest/gtest.h", regardless of what foo is, since all
# gtest headers are fused into gtest/gtest.h.
# There is no need to #include gtest.h twice.
if not gtest.GTEST_H_SEED in processed_files:
processed_files.add(gtest.GTEST_H_SEED)
output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_H_SEED)
output_file.close()
def FuseGMockAllCcToFile(gmock_root, output_file):
"""Scans folder gmock_root to fuse gmock-all.cc into output_file."""
processed_files = sets.Set()
def ProcessFile(gmock_source_file):
"""Processes the given gmock source file."""
# We don't process the same #included file twice.
if gmock_source_file in processed_files:
return
processed_files.add(gmock_source_file)
# Reads each line in the given gmock source file.
for line in file(os.path.join(gmock_root, gmock_source_file), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/foo.h"'. We treat it as '#include
# "gmock/gmock.h"', as all other gmock headers are being fused
# into gmock.h and cannot be #included directly.
# There is no need to #include "gmock/gmock.h" more than once.
if not GMOCK_H_SEED in processed_files:
processed_files.add(GMOCK_H_SEED)
output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."'.
# There is no need to #include gtest.h as it has been
# #included by gtest-all.cc.
pass
else:
m = gtest.INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_ALL_CC_SEED)
def FuseGMockGTestAllCc(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), 'w')
# First, fuse gtest-all.cc into gmock-gtest-all.cc.
gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file)
# Next, append fused gmock-all.cc to gmock-gtest-all.cc.
FuseGMockAllCcToFile(gmock_root, output_file)
output_file.close()
def FuseGMock(gmock_root, output_dir):
"""Fuses gtest.h, gmock.h, and gmock-gtest-all.h."""
ValidateGMockRootDir(gmock_root)
ValidateOutputDir(output_dir)
gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir)
FuseGMockH(gmock_root, output_dir)
FuseGMockGTestAllCc(gmock_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gmock_files.py OUTPUT_DIR
FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR
FuseGMock(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| 8,631 | 34.817427 | 79 | py |
spegg | spegg-master/tests/googletest/googlemock/scripts/upload.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| 51,024 | 35.761527 | 80 | py |
spegg | spegg-master/tests/googletest/googlemock/scripts/upload_gmock.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gmock.py v0.1.0 -- uploads a Google Mock patch for review.
This simple wrapper passes all command line flags and
--cc=googlemock@googlegroups.com to upload.py.
USAGE: upload_gmock.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GMOCK_GROUP = 'googlemock@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Mock discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GMOCK_GROUP not in cc_list:
cc_list.append(GMOCK_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GMOCK_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| 2,833 | 34.873418 | 72 | py |
spegg | spegg-master/tests/googletest/googlemock/scripts/gmock_doctor.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Converts compiler's errors in code using Google Mock to plain English."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import re
import sys
_VERSION = '1.0.3'
_EMAIL = 'googlemock@googlegroups.com'
_COMMON_GMOCK_SYMBOLS = [
# Matchers
'_',
'A',
'AddressSatisfies',
'AllOf',
'An',
'AnyOf',
'ContainerEq',
'Contains',
'ContainsRegex',
'DoubleEq',
'ElementsAre',
'ElementsAreArray',
'EndsWith',
'Eq',
'Field',
'FloatEq',
'Ge',
'Gt',
'HasSubstr',
'IsInitializedProto',
'Le',
'Lt',
'MatcherCast',
'Matches',
'MatchesRegex',
'NanSensitiveDoubleEq',
'NanSensitiveFloatEq',
'Ne',
'Not',
'NotNull',
'Pointee',
'Property',
'Ref',
'ResultOf',
'SafeMatcherCast',
'StartsWith',
'StrCaseEq',
'StrCaseNe',
'StrEq',
'StrNe',
'Truly',
'TypedEq',
'Value',
# Actions
'Assign',
'ByRef',
'DeleteArg',
'DoAll',
'DoDefault',
'IgnoreResult',
'Invoke',
'InvokeArgument',
'InvokeWithoutArgs',
'Return',
'ReturnNew',
'ReturnNull',
'ReturnRef',
'SaveArg',
'SetArgReferee',
'SetArgPointee',
'SetArgumentPointee',
'SetArrayArgument',
'SetErrnoAndReturn',
'Throw',
'WithArg',
'WithArgs',
'WithoutArgs',
# Cardinalities
'AnyNumber',
'AtLeast',
'AtMost',
'Between',
'Exactly',
# Sequences
'InSequence',
'Sequence',
# Misc
'DefaultValue',
'Mock',
]
# Regex for matching source file path and line number in the compiler's errors.
_GCC_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(\d+:)?\s+'
_CLANG_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(?P<column>\d+):\s+'
_CLANG_NON_GMOCK_FILE_LINE_RE = (
r'(?P<file>.*[/\\^](?!gmock-)[^/\\]+):(?P<line>\d+):(?P<column>\d+):\s+')
def _FindAllMatches(regex, s):
"""Generates all matches of regex in string s."""
r = re.compile(regex)
return r.finditer(s)
def _GenericDiagnoser(short_name, long_name, diagnoses, msg):
"""Diagnoses the given disease by pattern matching.
Can provide different diagnoses for different patterns.
Args:
short_name: Short name of the disease.
long_name: Long name of the disease.
diagnoses: A list of pairs (regex, pattern for formatting the diagnosis
for matching regex).
msg: Compiler's error messages.
Yields:
Tuples of the form
(short name of disease, long name of disease, diagnosis).
"""
for regex, diagnosis in diagnoses:
if re.search(regex, msg):
diagnosis = '%(file)s:%(line)s:' + diagnosis
for m in _FindAllMatches(regex, msg):
yield (short_name, long_name, diagnosis % m.groupdict())
def _NeedToReturnReferenceDiagnoser(msg):
"""Diagnoses the NRR disease, given the error messages by the compiler."""
gcc_regex = (r'In member function \'testing::internal::ReturnAction<R>.*\n'
+ _GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: creating array with negative size')
clang_regex = (r'error:.*array.*negative.*\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of function template specialization '
r'\'testing::internal::ReturnAction<(?P<type>.*)>'
r'::operator Action<.*>\' requested here')
clang11_re = (r'use_ReturnRef_instead_of_Return_to_return_a_reference.*'
r'(.*\n)*?' + _CLANG_NON_GMOCK_FILE_LINE_RE)
diagnosis = """
You are using a Return() action in a function that returns a reference to
%(type)s. Please use ReturnRef() instead."""
return _GenericDiagnoser('NRR', 'Need to Return Reference',
[(clang_regex, diagnosis),
(clang11_re, diagnosis % {'type': 'a type'}),
(gcc_regex, diagnosis % {'type': 'a type'})],
msg)
def _NeedToReturnSomethingDiagnoser(msg):
"""Diagnoses the NRS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'(instantiated from here\n.'
r'*gmock.*actions\.h.*error: void value not ignored)'
r'|(error: control reaches end of non-void function)')
clang_regex1 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'Result\' \(aka \'(?P<return_type>.*)\'\) '
r'with an rvalue of type \'void\'')
clang_regex2 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'(?P<return_type>.*)\' '
r'with an rvalue of type \'void\'')
diagnosis = """
You are using an action that returns void, but it needs to return
%(return_type)s. Please tell it *what* to return. Perhaps you can use
the pattern DoAll(some_action, Return(some_value))?"""
return _GenericDiagnoser(
'NRS',
'Need to Return Something',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _NeedToReturnNothingDiagnoser(msg):
"""Diagnoses the NRN disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: instantiation of '
r'\'testing::internal::ReturnAction<R>::Impl<F>::value_\' '
r'as type \'void\'')
clang_regex1 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(?P<return_type>.*)>'
r'::operator Action<void \(.*\)>\' requested here')
clang_regex2 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::DoBothAction<.*>'
r'::operator Action<(?P<return_type>.*) \(.*\)>\' '
r'requested here')
diagnosis = """
You are using an action that returns %(return_type)s, but it needs to return
void. Please use a void-returning action instead.
All actions but the last in DoAll(...) must return void. Perhaps you need
to re-arrange the order of actions in a DoAll(), if you are using one?"""
return _GenericDiagnoser(
'NRN',
'Need to Return Nothing',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _IncompleteByReferenceArgumentDiagnoser(msg):
"""Diagnoses the IBRA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to incomplete type \'(?P<type>.*)\'')
clang_regex = (r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to an incomplete type '
r'\'(?P<type>.*)( const)?\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of member function '
r'\'testing::internal2::TypeWithoutFormatter<.*>::'
r'PrintValue\' requested here')
diagnosis = """
In order to mock this function, Google Mock needs to see the definition
of type "%(type)s" - declaration alone is not enough. Either #include
the header that defines it, or change the argument to be passed
by pointer."""
return _GenericDiagnoser('IBRA', 'Incomplete By-Reference Argument Type',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionMatcherDiagnoser(msg):
"""Diagnoses the OFM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly\(<unresolved overloaded function type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly')
diagnosis = """
The argument you gave to Truly() is an overloaded function. Please tell
your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool Foo(int n);
you should write
Truly(static_cast<bool (*)(int n)>(Foo))"""
return _GenericDiagnoser('OFM', 'Overloaded Function Matcher',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionActionDiagnoser(msg):
"""Diagnoses the OFA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for call to '
r'\'Invoke\(<unresolved overloaded function type>')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching '
r'function for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-generated-actions\.h:\d+:\d+:\s+'
r'note: candidate template ignored:\s+'
r'couldn\'t infer template argument \'FunctionImpl\'')
diagnosis = """
Function you are passing to Invoke is overloaded. Please tell your compiler
which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool MyFunction(int n, double x);
you should write something like
Invoke(static_cast<bool (*)(int n, double x)>(MyFunction))"""
return _GenericDiagnoser('OFA', 'Overloaded Function Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedMethodActionDiagnoser(msg):
"""Diagnoses the OMA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Invoke\(.+, <unresolved overloaded function '
r'type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function '
r'for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-generated-actions\.h:\d+:\d+: '
r'note: candidate function template not viable: '
r'requires .*, but 2 (arguments )?were provided')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _MockObjectPointerDiagnoser(msg):
"""Diagnoses the MOP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: request for member '
r'\'gmock_(?P<method>.+)\' in \'(?P<mock_object>.+)\', '
r'which is of non-class type \'(.*::)*(?P<class_name>.+)\*\'')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: member reference type '
r'\'(?P<class_name>.*?) *\' is a pointer; '
r'(did you mean|maybe you meant) to use \'->\'\?')
diagnosis = """
The first argument to ON_CALL() and EXPECT_CALL() must be a mock *object*,
not a *pointer* to it. Please write '*(%(mock_object)s)' instead of
'%(mock_object)s' as your first argument.
For example, given the mock class:
class %(class_name)s : public ... {
...
MOCK_METHOD0(%(method)s, ...);
};
and the following mock instance:
%(class_name)s* mock_ptr = ...
you should use the EXPECT_CALL like this:
EXPECT_CALL(*mock_ptr, %(method)s(...));"""
return _GenericDiagnoser(
'MOP',
'Mock Object Pointer',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis % {'mock_object': 'mock_object',
'method': 'method',
'class_name': '%(class_name)s'})],
msg)
def _NeedToUseSymbolDiagnoser(msg):
"""Diagnoses the NUS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: \'(?P<symbol>.+)\' '
r'(was not declared in this scope|has not been declared)')
clang_regex = (_CLANG_FILE_LINE_RE +
r'error: (use of undeclared identifier|unknown type name|'
r'no template named) \'(?P<symbol>[^\']+)\'')
diagnosis = """
'%(symbol)s' is defined by Google Mock in the testing namespace.
Did you forget to write
using testing::%(symbol)s;
?"""
for m in (list(_FindAllMatches(gcc_regex, msg)) +
list(_FindAllMatches(clang_regex, msg))):
symbol = m.groupdict()['symbol']
if symbol in _COMMON_GMOCK_SYMBOLS:
yield ('NUS', 'Need to Use Symbol', diagnosis % m.groupdict())
def _NeedToUseReturnNullDiagnoser(msg):
"""Diagnoses the NRNULL disease, given the error messages by the compiler."""
gcc_regex = ('instantiated from \'testing::internal::ReturnAction<R>'
'::operator testing::Action<Func>\(\) const.*\n' +
_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*error: no matching function for call to \'ImplicitCast_\('
r'(:?long )?int&\)')
clang_regex = (r'\bgmock-actions.h:.* error: no matching function for '
r'call to \'ImplicitCast_\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(int|long)>::operator '
r'Action<(?P<type>.*)\(\)>\' requested here')
diagnosis = """
You are probably calling Return(NULL) and the compiler isn't sure how to turn
NULL into %(type)s. Use ReturnNull() instead.
Note: the line number may be off; please fix all instances of Return(NULL)."""
return _GenericDiagnoser(
'NRNULL', 'Need to use ReturnNull',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'the right type'})],
msg)
def _TypeInTemplatedBaseDiagnoser(msg):
"""Diagnoses the TTB disease, given the error messages by the compiler."""
# This version works when the type is used as the mock function's return
# type.
gcc_4_3_1_regex_type_in_retval = (
r'In member function \'int .*\n' + _GCC_FILE_LINE_RE +
r'error: a function call cannot appear in a constant-expression')
gcc_4_4_0_regex_type_in_retval = (
r'error: a function call cannot appear in a constant-expression'
+ _GCC_FILE_LINE_RE + r'error: template argument 1 is invalid\n')
# This version works when the type is used as the mock function's sole
# parameter type.
gcc_regex_type_of_sole_param = (
_GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n')
# This version works when the type is used as a parameter of a mock
# function that has multiple parameters.
gcc_regex_type_of_a_param = (
r'error: expected `;\' before \'::\' token\n'
+ _GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n'
r'.*error: \'.+\' was not declared in this scope')
clang_regex_type_of_retval_or_sole_param = (
_CLANG_FILE_LINE_RE +
r'error: use of undeclared identifier \'(?P<type>.*)\'\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):\d+: error: '
r'non-friend class member \'Result\' cannot have a qualified name'
)
clang_regex_type_of_a_param = (
_CLANG_FILE_LINE_RE +
r'error: C\+\+ requires a type specifier for all declarations\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: '
r'C\+\+ requires a type specifier for all declarations'
)
clang_regex_unknown_type = (
_CLANG_FILE_LINE_RE +
r'error: unknown type name \'(?P<type>[^\']+)\''
)
diagnosis = """
In a mock class template, types or typedefs defined in the base class
template are *not* automatically visible. This is how C++ works. Before
you can use a type or typedef named %(type)s defined in base class Base<T>, you
need to make it visible. One way to do it is:
typedef typename Base<T>::%(type)s %(type)s;"""
for diag in _GenericDiagnoser(
'TTB', 'Type in Template Base',
[(gcc_4_3_1_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_4_4_0_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_regex_type_of_sole_param, diagnosis),
(gcc_regex_type_of_a_param, diagnosis),
(clang_regex_type_of_retval_or_sole_param, diagnosis),
(clang_regex_type_of_a_param, diagnosis % {'type': 'Foo'})],
msg):
yield diag
# Avoid overlap with the NUS pattern.
for m in _FindAllMatches(clang_regex_unknown_type, msg):
type_ = m.groupdict()['type']
if type_ not in _COMMON_GMOCK_SYMBOLS:
yield ('TTB', 'Type in Template Base', diagnosis % m.groupdict())
def _WrongMockMethodMacroDiagnoser(msg):
"""Diagnoses the WMM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'.*this_method_does_not_take_(?P<wrong_args>\d+)_argument.*\n'
r'.*\n'
r'.*candidates are.*FunctionMocker<[^>]+A(?P<args>\d+)\)>')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error:.*array.*negative.*r?\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: too few arguments '
r'to function call, expected (?P<args>\d+), '
r'have (?P<wrong_args>\d+)')
clang11_re = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'.*this_method_does_not_take_'
r'(?P<wrong_args>\d+)_argument.*')
diagnosis = """
You are using MOCK_METHOD%(wrong_args)s to define a mock method that has
%(args)s arguments. Use MOCK_METHOD%(args)s (or MOCK_CONST_METHOD%(args)s,
MOCK_METHOD%(args)s_T, MOCK_CONST_METHOD%(args)s_T as appropriate) instead."""
return _GenericDiagnoser('WMM', 'Wrong MOCK_METHODn Macro',
[(gcc_regex, diagnosis),
(clang11_re, diagnosis % {'wrong_args': 'm',
'args': 'n'}),
(clang_regex, diagnosis)],
msg)
def _WrongParenPositionDiagnoser(msg):
"""Diagnoses the WPP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'error:.*testing::internal::MockSpec<.* has no member named \''
r'(?P<method>\w+)\'')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error: no member named \'(?P<method>\w+)\' in '
r'\'testing::internal::MockSpec<.*>\'')
diagnosis = """
The closing parenthesis of ON_CALL or EXPECT_CALL should be *before*
".%(method)s". For example, you should write:
EXPECT_CALL(my_mock, Foo(_)).%(method)s(...);
instead of:
EXPECT_CALL(my_mock, Foo(_).%(method)s(...));"""
return _GenericDiagnoser('WPP', 'Wrong Parenthesis Position',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
_DIAGNOSERS = [
_IncompleteByReferenceArgumentDiagnoser,
_MockObjectPointerDiagnoser,
_NeedToReturnNothingDiagnoser,
_NeedToReturnReferenceDiagnoser,
_NeedToReturnSomethingDiagnoser,
_NeedToUseReturnNullDiagnoser,
_NeedToUseSymbolDiagnoser,
_OverloadedFunctionActionDiagnoser,
_OverloadedFunctionMatcherDiagnoser,
_OverloadedMethodActionDiagnoser,
_TypeInTemplatedBaseDiagnoser,
_WrongMockMethodMacroDiagnoser,
_WrongParenPositionDiagnoser,
]
def Diagnose(msg):
"""Generates all possible diagnoses given the compiler error message."""
msg = re.sub(r'\x1b\[[^m]*m', '', msg) # Strips all color formatting.
# Assuming the string is using the UTF-8 encoding, replaces the left and
# the right single quote characters with apostrophes.
msg = re.sub(r'(\xe2\x80\x98|\xe2\x80\x99)', "'", msg)
diagnoses = []
for diagnoser in _DIAGNOSERS:
for diag in diagnoser(msg):
diagnosis = '[%s - %s]\n%s' % diag
if not diagnosis in diagnoses:
diagnoses.append(diagnosis)
return diagnoses
def main():
print ('Google Mock Doctor v%s - '
'diagnoses problems in code using Google Mock.' % _VERSION)
if sys.stdin.isatty():
print ('Please copy and paste the compiler errors here. Press c-D when '
'you are done:')
else:
print ('Waiting for compiler errors on stdin . . .')
msg = sys.stdin.read().strip()
diagnoses = Diagnose(msg)
count = len(diagnoses)
if not count:
print ("""
Your compiler complained:
8<------------------------------------------------------------
%s
------------------------------------------------------------>8
Uh-oh, I'm not smart enough to figure out what the problem is. :-(
However...
If you send your source code and the compiler's error messages to
%s, you can be helped and I can get smarter --
win-win for us!""" % (msg, _EMAIL))
else:
print ('------------------------------------------------------------')
print ('Your code appears to have the following',)
if count > 1:
print ('%s diseases:' % (count,))
else:
print ('disease:')
i = 0
for d in diagnoses:
i += 1
if count > 1:
print ('\n#%s:' % (i,))
print (d)
print ("""
How did I do? If you think I'm wrong or unhelpful, please send your
source code and the compiler's error messages to %s.
Then you can be helped and I can get smarter -- I promise I won't be upset!""" %
_EMAIL)
if __name__ == '__main__':
main()
| 24,131 | 36.647426 | 80 | py |
spegg | spegg-master/tests/googletest/googlemock/scripts/generator/gmock_gen.py | #!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Driver for starting up Google Mock class generator."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import sys
if __name__ == '__main__':
# Add the directory of this script to the path so we can import gmock_class.
sys.path.append(os.path.dirname(__file__))
from cpp import gmock_class
# Fix the docstring in case they require the usage.
gmock_class.__doc__ = gmock_class.__doc__.replace('gmock_class.py', __file__)
gmock_class.main()
| 1,091 | 33.125 | 79 | py |
spegg | spegg-master/tests/googletest/googlemock/scripts/generator/cpp/gmock_class.py | #!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate Google Mock classes from base classes.
This program will read in a C++ source file and output the Google Mock
classes for the specified classes. If no class is specified, all
classes in the source file are emitted.
Usage:
gmock_class.py header-file.h [ClassName]...
Output is sent to stdout.
"""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import re
import sys
from cpp import ast
from cpp import utils
# Preserve compatibility with Python 2.3.
try:
_dummy = set
except NameError:
import sets
set = sets.Set
_VERSION = (1, 0, 1) # The version of this script.
# How many spaces to indent. Can set me with the INDENT environment variable.
_INDENT = 2
def _GenerateMethods(output_lines, source, class_node):
function_type = (ast.FUNCTION_VIRTUAL | ast.FUNCTION_PURE_VIRTUAL |
ast.FUNCTION_OVERRIDE)
ctor_or_dtor = ast.FUNCTION_CTOR | ast.FUNCTION_DTOR
indent = ' ' * _INDENT
for node in class_node.body:
# We only care about virtual functions.
if (isinstance(node, ast.Function) and
node.modifiers & function_type and
not node.modifiers & ctor_or_dtor):
# Pick out all the elements we need from the original function.
const = ''
if node.modifiers & ast.FUNCTION_CONST:
const = 'CONST_'
return_type = 'void'
if node.return_type:
# Add modifiers like 'const'.
modifiers = ''
if node.return_type.modifiers:
modifiers = ' '.join(node.return_type.modifiers) + ' '
return_type = modifiers + node.return_type.name
template_args = [arg.name for arg in node.return_type.templated_types]
if template_args:
return_type += '<' + ', '.join(template_args) + '>'
if len(template_args) > 1:
for line in [
'// The following line won\'t really compile, as the return',
'// type has multiple template arguments. To fix it, use a',
'// typedef for the return type.']:
output_lines.append(indent + line)
if node.return_type.pointer:
return_type += '*'
if node.return_type.reference:
return_type += '&'
num_parameters = len(node.parameters)
if len(node.parameters) == 1:
first_param = node.parameters[0]
if source[first_param.start:first_param.end].strip() == 'void':
# We must treat T(void) as a function with no parameters.
num_parameters = 0
tmpl = ''
if class_node.templated_types:
tmpl = '_T'
mock_method_macro = 'MOCK_%sMETHOD%d%s' % (const, num_parameters, tmpl)
args = ''
if node.parameters:
# Due to the parser limitations, it is impossible to keep comments
# while stripping the default parameters. When defaults are
# present, we choose to strip them and comments (and produce
# compilable code).
# TODO(nnorwitz@google.com): Investigate whether it is possible to
# preserve parameter name when reconstructing parameter text from
# the AST.
if len([param for param in node.parameters if param.default]) > 0:
args = ', '.join(param.type.name for param in node.parameters)
else:
# Get the full text of the parameters from the start
# of the first parameter to the end of the last parameter.
start = node.parameters[0].start
end = node.parameters[-1].end
# Remove // comments.
args_strings = re.sub(r'//.*', '', source[start:end])
# Condense multiple spaces and eliminate newlines putting the
# parameters together on a single line. Ensure there is a
# space in an argument which is split by a newline without
# intervening whitespace, e.g.: int\nBar
args = re.sub(' +', ' ', args_strings.replace('\n', ' '))
# Create the mock method definition.
output_lines.extend(['%s%s(%s,' % (indent, mock_method_macro, node.name),
'%s%s(%s));' % (indent*3, return_type, args)])
def _GenerateMocks(filename, source, ast_list, desired_class_names):
processed_class_names = set()
lines = []
for node in ast_list:
if (isinstance(node, ast.Class) and node.body and
# desired_class_names being None means that all classes are selected.
(not desired_class_names or node.name in desired_class_names)):
class_name = node.name
parent_name = class_name
processed_class_names.add(class_name)
class_node = node
# Add namespace before the class.
if class_node.namespace:
lines.extend(['namespace %s {' % n for n in class_node.namespace]) # }
lines.append('')
# Add template args for templated classes.
if class_node.templated_types:
# TODO(paulchang): The AST doesn't preserve template argument order,
# so we have to make up names here.
# TODO(paulchang): Handle non-type template arguments (e.g.
# template<typename T, int N>).
template_arg_count = len(class_node.templated_types.keys())
template_args = ['T%d' % n for n in range(template_arg_count)]
template_decls = ['typename ' + arg for arg in template_args]
lines.append('template <' + ', '.join(template_decls) + '>')
parent_name += '<' + ', '.join(template_args) + '>'
# Add the class prolog.
lines.append('class Mock%s : public %s {' # }
% (class_name, parent_name))
lines.append('%spublic:' % (' ' * (_INDENT // 2)))
# Add all the methods.
_GenerateMethods(lines, source, class_node)
# Close the class.
if lines:
# If there are no virtual methods, no need for a public label.
if len(lines) == 2:
del lines[-1]
# Only close the class if there really is a class.
lines.append('};')
lines.append('') # Add an extra newline.
# Close the namespace.
if class_node.namespace:
for i in range(len(class_node.namespace)-1, -1, -1):
lines.append('} // namespace %s' % class_node.namespace[i])
lines.append('') # Add an extra newline.
if desired_class_names:
missing_class_name_list = list(desired_class_names - processed_class_names)
if missing_class_name_list:
missing_class_name_list.sort()
sys.stderr.write('Class(es) not found in %s: %s\n' %
(filename, ', '.join(missing_class_name_list)))
elif not processed_class_names:
sys.stderr.write('No class found in %s\n' % filename)
return lines
def main(argv=sys.argv):
if len(argv) < 2:
sys.stderr.write('Google Mock Class Generator v%s\n\n' %
'.'.join(map(str, _VERSION)))
sys.stderr.write(__doc__)
return 1
global _INDENT
try:
_INDENT = int(os.environ['INDENT'])
except KeyError:
pass
except:
sys.stderr.write('Unable to use indent of %s\n' % os.environ.get('INDENT'))
filename = argv[1]
desired_class_names = None # None means all classes in the source file.
if len(argv) >= 3:
desired_class_names = set(argv[2:])
source = utils.ReadFile(filename)
if source is None:
return 1
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# An error message was already printed since we couldn't parse.
sys.exit(1)
else:
lines = _GenerateMocks(filename, source, entire_ast, desired_class_names)
sys.stdout.write('\n'.join(lines))
if __name__ == '__main__':
main(sys.argv)
| 8,293 | 35.377193 | 79 | py |
spegg | spegg-master/tests/googletest/googlemock/scripts/generator/cpp/keywords.py | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
| 2,004 | 32.416667 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.